code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import math
import sys
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if number != int(UpperCamelCase__ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
__magic_name__ : Dict = [-1] * (number + 1)
__magic_name__ : int = 0
for i in range(1 , number + 1 ):
__magic_name__ : List[str] = sys.maxsize
__magic_name__ : Union[str, Any] = int(math.sqrt(UpperCamelCase__ ) )
for j in range(1 , root + 1 ):
__magic_name__ : str = 1 + answers[i - (j**2)]
__magic_name__ : Dict = min(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : Optional[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 436 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 203 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 450 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , a : nn.Module , a : int ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE = module
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Linear(module.in_features , a , bias=a ) , nn.Linear(a , module.out_features , bias=a ) , )
SCREAMING_SNAKE_CASE = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCAmelCase ( self : Tuple , a : str , *a : List[Any] , **a : List[Any] ) -> List[Any]:
return self.module(a , *a , **a ) + self.adapter(a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = '''bigscience/bloom-1b7'''
# Constant values
a__ = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
a__ = '''Hello my name is'''
a__ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
a__ = 1_0
def _UpperCAmelCase ( self : Dict ) -> str:
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(self.model_name )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
def _UpperCAmelCase ( self : Tuple ) -> int:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_abit.config
self.assertTrue(hasattr(a , """quantization_config""" ) )
SCREAMING_SNAKE_CASE = config.to_dict()
SCREAMING_SNAKE_CASE = config.to_diff_dict()
SCREAMING_SNAKE_CASE = config.to_json_string()
def _UpperCAmelCase ( self : Any ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCAmelCase ( self : Tuple ) -> List[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCAmelCase ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def _UpperCAmelCase ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def _UpperCAmelCase ( self : str ) -> Optional[int]:
with self.assertRaises(a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a )
def _UpperCAmelCase ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , load_in_abit=a , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def _UpperCAmelCase ( self : Optional[Any] ) -> int:
with self.assertRaises(a ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.float()
def _UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=a , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE = """t5-small"""
SCREAMING_SNAKE_CASE = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE = """Translate in German: Hello, my dog is cute"""
def _UpperCAmelCase ( self : Any ) -> List[str]:
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : List[str] ) -> int:
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE = None
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
SCREAMING_SNAKE_CASE = modules
def _UpperCAmelCase ( self : int ) -> int:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
super().setUp()
# model_name
SCREAMING_SNAKE_CASE = """bigscience/bloom-560m"""
SCREAMING_SNAKE_CASE = """t5-small"""
# Different types of model
SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
# Sequence classification model
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a , device_map="""auto""" )
# CausalLM model
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
# Seq2seq model
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a , device_map="""auto""" )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Dict ) -> List[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Union[str, Any] ) -> str:
super().setUp()
def _UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ) -> Optional[int]:
super().setUp()
def _UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
SCREAMING_SNAKE_CASE = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
super().setUp()
def _UpperCAmelCase ( self : Any ) -> Tuple:
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a ) ):
SCREAMING_SNAKE_CASE = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE = model.forward(**a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(a , a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = '''gpt2-xl'''
a__ = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 450 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A_ = logging.get_logger(__name__)
A_ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self: Union[str, Any] , UpperCamelCase_: str=None , UpperCamelCase_: Any=None , *UpperCamelCase_: List[Any] , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if config is None:
assert isinstance(self.model , UpperCamelCase_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
UpperCamelCase_ =self.model.config
else:
UpperCamelCase_ =config
UpperCamelCase_ =data_args
UpperCamelCase_ =self.config.tgt_vocab_size if isinstance(self.config , UpperCamelCase_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
UpperCamelCase_ =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCamelCase_ =label_smoothed_nll_loss
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: int ):
if self.optimizer is None:
UpperCamelCase_ =["bias", "LayerNorm.weight"]
UpperCamelCase_ =[
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
UpperCamelCase_ =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCamelCase_ =Adafactor
UpperCamelCase_ ={"scale_parameter": False, "relative_step": False}
else:
UpperCamelCase_ =AdamW
UpperCamelCase_ ={
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
UpperCamelCase_ =self.args.learning_rate
if self.sharded_ddp:
UpperCamelCase_ =OSS(
params=UpperCamelCase_ , optim=UpperCamelCase_ , **UpperCamelCase_ , )
else:
UpperCamelCase_ =optimizer_cls(UpperCamelCase_ , **UpperCamelCase_ )
if self.lr_scheduler is None:
UpperCamelCase_ =self._get_lr_scheduler(UpperCamelCase_ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def UpperCamelCase__ ( self: str , UpperCamelCase_: Optional[int] ):
UpperCamelCase_ =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCamelCase_ =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCamelCase_ =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCamelCase_ =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCamelCase_ )
return scheduler
def UpperCamelCase__ ( self: Dict ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCamelCase_ =model(**UpperCamelCase_ , use_cache=UpperCamelCase_ )[0]
UpperCamelCase_ =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCamelCase_ , UpperCamelCase_ =model(**UpperCamelCase_ , labels=UpperCamelCase_ , use_cache=UpperCamelCase_ )[:2]
else:
# compute label smoothed loss
UpperCamelCase_ =model(**UpperCamelCase_ , use_cache=UpperCamelCase_ )[0]
UpperCamelCase_ =torch.nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
UpperCamelCase_ , UpperCamelCase_ =self.loss_fn(UpperCamelCase_ , UpperCamelCase_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] ):
UpperCamelCase_ =inputs.pop("labels" )
UpperCamelCase_ , UpperCamelCase_ =self._compute_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return loss
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: nn.Module , UpperCamelCase_: Dict[str, Union[torch.Tensor, Any]] , UpperCamelCase_: bool , UpperCamelCase_: Optional[List[str]] = None , ):
UpperCamelCase_ =self._prepare_inputs(UpperCamelCase_ )
UpperCamelCase_ ={
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCamelCase_ =self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **UpperCamelCase_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCamelCase_ =self._pad_tensors_to_max_len(UpperCamelCase_ , gen_kwargs["max_length"] )
UpperCamelCase_ =inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
UpperCamelCase_ , UpperCamelCase_ =self._compute_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCamelCase_ =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCamelCase_ =self._pad_tensors_to_max_len(UpperCamelCase_ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def UpperCamelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: str ):
# If PAD token is not defined at least EOS token has to be defined
UpperCamelCase_ =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
UpperCamelCase_ =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCamelCase_ =tensor
return padded_tensor
| 391 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 391 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Optional[Any]:
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__ = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
lowercase__ = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def snake_case_( self , **_lowerCamelCase )-> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def snake_case_( self , **_lowerCamelCase )-> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def snake_case_( self )-> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_( self )-> Union[str, Any]:
lowercase__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_( self )-> Tuple:
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def snake_case_( self )-> Tuple:
lowercase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
lowercase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def snake_case_( self )-> Any:
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_lowerCamelCase , return_tensors='''np''' )
lowercase__ = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_( self )-> Optional[int]:
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = processor(text=_lowerCamelCase )
lowercase__ = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_( self )-> Optional[Any]:
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_lowerCamelCase ):
processor()
def snake_case_( self )-> Optional[int]:
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_lowerCamelCase )
lowercase__ = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def snake_case_( self )-> Any:
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 716 |
'''simple docstring'''
import numpy as np
import datasets
_lowerCAmelCase = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
_lowerCAmelCase = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
_lowerCAmelCase = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def snake_case_( self )-> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> Any:
# convert to numpy arrays
lowercase__ = np.array(_lowerCamelCase )
lowercase__ = np.array(_lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowercase__ = X - np.mean(_lowerCamelCase )
lowercase__ = np.cov(reference_distribution.T )
try:
lowercase__ = np.linalg.inv(_lowerCamelCase )
except np.linalg.LinAlgError:
lowercase__ = np.linalg.pinv(_lowerCamelCase )
lowercase__ = np.dot(_lowerCamelCase , _lowerCamelCase )
lowercase__ = np.dot(_lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 318 | 0 |
import datasets
a__ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
a__ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
a__ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : int) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32"""),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32"""),
}) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return {"accuracy": simple_accuracy(lowerCAmelCase , lowerCAmelCase)}
| 477 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case ( enum.Enum ):
'''simple docstring'''
snake_case_ : Any = 0
snake_case_ : Tuple = 1
snake_case_ : int = 2
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case : int = None
if self.model.config.prefix is not None:
_snake_case : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case : List[str] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case : Optional[int] = self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params)
_snake_case : Dict = {**self._preprocess_params, **preprocess_params}
_snake_case : Optional[int] = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : int=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[Any] = {}
if prefix is not None:
_snake_case : Tuple = prefix
if prefix:
_snake_case : Optional[int] = self.tokenizer(
lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework)
_snake_case : Any = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""")
_snake_case : Tuple = handle_long_generation
preprocess_params.update(lowerCAmelCase)
_snake_case : str = generate_kwargs
_snake_case : Optional[int] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
_snake_case : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
_snake_case : Tuple = ReturnType.TENSORS
if return_type is not None:
_snake_case : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
_snake_case : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case : str = self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
if len(lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
_snake_case : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase)
def __call__( self : Tuple , lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
return super().__call__(lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : str="" , lowerCAmelCase : Any=None , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
_snake_case : Dict = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework)
_snake_case : Tuple = prompt_text
if handle_long_generation == "hole":
_snake_case : int = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case : Optional[int] = generate_kwargs["""max_new_tokens"""]
else:
_snake_case : Dict = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
_snake_case : List[Any] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case : Optional[int] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : int , lowerCAmelCase : Dict , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = model_inputs["""input_ids"""]
_snake_case : List[Any] = model_inputs.get("""attention_mask""" , lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Any = 1
else:
_snake_case : List[Any] = input_ids.shape[0]
_snake_case : Tuple = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case : Any = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
_snake_case : Dict = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case : Optional[int] = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case : str = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case : Optional[int] = self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Tuple = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case : List[Any] = generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
_snake_case : Dict = tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=ReturnType.FULL_TEXT , lowerCAmelCase : Union[str, Any]=True) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = model_outputs["""generated_sequence"""][0]
_snake_case : List[str] = model_outputs["""input_ids"""]
_snake_case : Optional[Any] = model_outputs["""prompt_text"""]
_snake_case : str = generated_sequence.numpy().tolist()
_snake_case : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case : Union[str, Any] = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case : int = self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case : str = 0
else:
_snake_case : List[Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
_snake_case : Any = prompt_text + text[prompt_length:]
else:
_snake_case : Union[str, Any] = text[prompt_length:]
_snake_case : List[str] = {"""generated_text""": all_text}
records.append(lowerCAmelCase)
return records
| 477 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : Any=3, _lowerCamelCase : List[Any]=18, _lowerCamelCase : str=30, _lowerCamelCase : List[Any]=4_00, _lowerCamelCase : List[str]=True, _lowerCamelCase : List[Any]=None, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : List[str]=False, _lowerCamelCase : str=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], _lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size if size is not None else {'''height''': 18, '''width''': 20}
__A = do_thumbnail
__A = do_align_axis
__A = do_pad
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = DonutImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = DonutImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_thumbnail''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 20} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {'''height''': 84, '''width''': 42} )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
| 215 | 0 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 18 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCAmelCase : Any ):
"""simple docstring"""
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
lowerCAmelCase_ = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__lowerCAmelCase )
lowerCAmelCase_ = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCAmelCase_ = sd.pop(__lowerCAmelCase )
lowerCAmelCase_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCAmelCase_ = sd[key]
# We split QKV in separate Q,K,V
lowerCAmelCase_ = key.replace(".qkv_proj." , ".q_proj." )
lowerCAmelCase_ = key.replace(".qkv_proj." , ".k_proj." )
lowerCAmelCase_ = key.replace(".qkv_proj." , ".v_proj." )
lowerCAmelCase_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = torch.split(__lowerCAmelCase , depth // 3 , dim=0 )
lowerCAmelCase_ = q
lowerCAmelCase_ = k
lowerCAmelCase_ = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
lowerCAmelCase_ = load_checkpoint(__lowerCAmelCase )
if config is not None:
lowerCAmelCase_ = OPTConfig.from_pretrained(__lowerCAmelCase )
else:
lowerCAmelCase_ = OPTConfig()
lowerCAmelCase_ = OPTModel(__lowerCAmelCase ).half().eval()
model.load_state_dict(__lowerCAmelCase )
# Check results
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_A = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 290 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a_ : List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
a_ : str = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("utf-8").split()
a_ : Any = "|".join(sys.argv[1:])
a_ : int = re.compile(RF"""^({joined_dirs}).*?\.py$""")
a_ : Optional[int] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_SCREAMING_SNAKE_CASE : Optional[int] = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_SCREAMING_SNAKE_CASE : List[Any] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_SCREAMING_SNAKE_CASE : Optional[int] = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def a_ ( self , __snake_case , __snake_case ):
snake_case = 0.0
for i, j in zip(__snake_case , __snake_case ):
n_correct += 1.0 if math_equivalence.is_equiv(__snake_case , __snake_case ) else 0.0
snake_case = n_correct / len(__snake_case )
return {
"accuracy": accuracy,
}
| 550 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , ):
snake_case = parent
snake_case = 1_3
snake_case = 7
snake_case = 3_0
snake_case = self.seq_length + self.mem_len
snake_case = 1_5
snake_case = True
snake_case = True
snake_case = 9_9
snake_case = [1_0, 5_0, 8_0]
snake_case = 3_2
snake_case = 3_2
snake_case = 4
snake_case = 8
snake_case = 1_2_8
snake_case = 2
snake_case = 2
snake_case = None
snake_case = 1
snake_case = 0
snake_case = 3
snake_case = self.vocab_size - 1
snake_case = 0.01
def a_ ( self ):
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def a_ ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = TFTransfoXLModel(__snake_case )
snake_case , snake_case = model(__snake_case ).to_tuple()
snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a}
snake_case , snake_case = model(__snake_case ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = TFTransfoXLLMHeadModel(__snake_case )
snake_case , snake_case = model(__snake_case ).to_tuple()
snake_case = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
snake_case , snake_case = model(__snake_case ).to_tuple()
snake_case , snake_case = model([input_ids_a, mems_a] ).to_tuple()
snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
snake_case , snake_case = model(__snake_case ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = TFTransfoXLForSequenceClassification(__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
((snake_case) , (snake_case) , (snake_case) , (snake_case)) = config_and_inputs
snake_case = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__magic_name__ = () if is_tf_available() else ()
__magic_name__ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def a_ ( self ):
snake_case = TFTransfoXLModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , d_embed=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
self.model_tester.set_seed()
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__snake_case )
def a_ ( self ):
self.model_tester.set_seed()
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__snake_case )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
snake_case = model.get_output_embeddings()
assert isinstance(__snake_case , tf.keras.layers.Layer )
snake_case = model.get_bias()
assert name is None
else:
snake_case = model.get_output_embeddings()
assert x is None
snake_case = model.get_bias()
assert name is None
def a_ ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def a_ ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFTransfoXLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def a_ ( self ):
pass
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def a_ ( self ):
snake_case = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
snake_case = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
snake_case = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
snake_case = model.generate(__snake_case , max_length=2_0_0 , do_sample=__snake_case )
self.assertListEqual(output_ids[0].numpy().tolist() , __snake_case )
| 550 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A: Tuple = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Tuple=False ):
UpperCAmelCase : Dict = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase : Dict = """"""
else:
UpperCAmelCase : str = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : str = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : List[Any] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def _snake_case ( UpperCamelCase : Any ):
UpperCAmelCase : Union[str, Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def _snake_case ( UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : Dict ):
UpperCAmelCase : Any = dct.pop(UpperCamelCase )
UpperCAmelCase : List[Any] = val
def _snake_case ( ):
UpperCAmelCase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Any=False ):
UpperCAmelCase : str = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=UpperCamelCase , )
UpperCAmelCase : List[Any] = ViTHybridConfig(backbone_config=UpperCamelCase , image_size=384 , num_labels=1000 )
UpperCAmelCase : str = False
# load original model from timm
UpperCAmelCase : str = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase )
UpperCAmelCase : List[str] = create_rename_keys(UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase , UpperCamelCase )
UpperCAmelCase : Optional[int] = """huggingface/label-files"""
UpperCAmelCase : Any = """imagenet-1k-id2label.json"""
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase : List[Any] = ViTHybridModel(UpperCamelCase ).eval()
else:
UpperCAmelCase : Optional[int] = ViTHybridForImageClassification(UpperCamelCase ).eval()
model.load_state_dict(UpperCamelCase )
# create image processor
UpperCAmelCase : int = create_transform(**resolve_data_config({} , model=UpperCamelCase ) )
UpperCAmelCase : Union[str, Any] = transform.transforms
UpperCAmelCase : Union[str, Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
UpperCAmelCase : List[Any] = ViTHybridImageProcessor(
do_resize=UpperCamelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCamelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=UpperCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase : Optional[int] = prepare_img()
UpperCAmelCase : Optional[int] = transform(UpperCamelCase ).unsqueeze(0 )
UpperCAmelCase : str = processor(UpperCamelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase , UpperCamelCase )
# verify logits
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
UpperCAmelCase : Dict = timm_model.forward_features(UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCamelCase , outputs.pooler_output , atol=1e-3 )
else:
UpperCAmelCase : Union[str, Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
A: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A: Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 718 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A: List[Any] = (3, 9, -1_1, 0, 7, 5, 1, -1)
A: Union[str, Any] = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : int
__lowerCAmelCase : Node | None
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Node | None = None
for i in sorted(_SCREAMING_SNAKE_CASE , reverse=_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = Node(_SCREAMING_SNAKE_CASE , self.head )
def __iter__( self ) -> Iterator[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.head
while node:
yield node.data
UpperCAmelCase : List[str] = node.next_node
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self ) -> str:
'''simple docstring'''
return " -> ".join([str(_SCREAMING_SNAKE_CASE ) for node in self] )
def _snake_case ( UpperCamelCase : SortedLinkedList , UpperCamelCase : SortedLinkedList ):
return SortedLinkedList(list(UpperCamelCase ) + list(UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 359 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
A: int = 1.5
A: Dict = int(factor * num_class_images )
A: Dict = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowerCamelCase__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=lowerCamelCase__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
A: Dict = client.query(text=lowerCamelCase__ )
if len(lowerCamelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
A: Union[str, Any] = int(factor * num_images )
A: Union[str, Any] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowerCamelCase__ , aesthetic_weight=0.1 , )
A: Tuple = 0
A: Tuple = 0
A: Any = tqdm(desc="""downloading real regularization images""" , total=lowerCamelCase__ )
with open(f'{class_data_dir}/caption.txt' , """w""" ) as fa, open(f'{class_data_dir}/urls.txt' , """w""" ) as fa, open(
f'{class_data_dir}/images.txt' , """w""" ) as fa:
while total < num_class_images:
A: int = class_images[count]
count += 1
try:
A: Optional[Any] = requests.get(images["""url"""] )
if img.status_code == 200:
A: List[Any] = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
A: List[Any] = argparse.ArgumentParser("""""" , add_help=lowerCamelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=lowerCamelCase__ , type=lowerCamelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=lowerCamelCase__ , type=lowerCamelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=lowerCamelCase__ )
return parser.parse_args()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str =parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 135 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[Any] ={
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str =[
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 135 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(a_ , a_ ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(a_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 712 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 100 | 0 |
def __lowerCAmelCase ( ) -> str:
__a = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__a = 6
__a = 1
__a = 1901
__a = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__a = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__a = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__a = day - days_per_month[month - 2]
if month > 12:
year += 1
__a = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 219 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A( unittest.TestCase ):
snake_case_ = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__a = text_generator('''This is a test''' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__a = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_snake_case , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__a = text_generator('''This is a test''' , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case )
self.assertEqual(
_snake_case , [
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
] , )
__a = text_generator.model.config.eos_token_id
__a = '''<pad>'''
__a = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , )
self.assertEqual(
_snake_case , [
[
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
],
[
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__a = text_generator('''This is a test''' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__a = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = TextGenerationPipeline(model=_snake_case , tokenizer=_snake_case )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = '''Hello I believe in'''
__a = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__a = text_generator(_snake_case )
self.assertEqual(
_snake_case , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__a = text_generator(_snake_case , stop_sequence=''' fe''' )
self.assertEqual(_snake_case , [{'''generated_text''': '''Hello I believe in fe'''}] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = text_generator.model
__a = text_generator.tokenizer
__a = text_generator('''This is a test''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__a = text_generator('''This is a test''' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__a = pipeline(task='''text-generation''' , model=_snake_case , tokenizer=_snake_case , return_full_text=_snake_case )
__a = text_generator('''This is a test''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__a = text_generator('''This is a test''' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__a = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__a = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
] , )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_full_text=_snake_case , return_text=_snake_case )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_full_text=_snake_case , return_tensors=_snake_case )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_text=_snake_case , return_tensors=_snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__a = text_generator('''''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__a = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__a = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
__a = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_snake_case ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
__a = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
import torch
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
import torch
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_snake_case , top_p=0.5 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = '''Hello world'''
__a = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__a = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__a = logging.get_logger('''transformers.generation.utils''' )
__a = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(_snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_new_tokens=1 )
self.assertNotIn(_snake_case , cl.out )
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_length=10 )
self.assertNotIn(_snake_case , cl.out ) | 219 | 1 |
"""simple docstring"""
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_snake_case )[-10:]
if __name__ == "__main__":
print(solution()) | 714 | """simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def snake_case__ ( _snake_case : List[str] , _snake_case : Optional[int]=0.999 , _snake_case : Optional[Any]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
UpperCamelCase__ = []
for i in range(_snake_case ):
UpperCamelCase__ = i / num_diffusion_timesteps
UpperCamelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class lowerCAmelCase ( snake_case__ , snake_case__ ):
'''simple docstring'''
A = [e.name for e in KarrasDiffusionSchedulers]
A = 2
@register_to_config
def __init__( self :Dict , lowerCamelCase_ :int = 1_0_0_0 , lowerCamelCase_ :float = 0.00_085 , lowerCamelCase_ :float = 0.012 , lowerCamelCase_ :str = "linear" , lowerCamelCase_ :Optional[Union[np.ndarray, List[float]]] = None , lowerCamelCase_ :str = "epsilon" , lowerCamelCase_ :Optional[bool] = False , lowerCamelCase_ :Optional[bool] = False , lowerCamelCase_ :float = 1.0 , lowerCamelCase_ :str = "linspace" , lowerCamelCase_ :int = 0 , ) -> Dict:
"""simple docstring"""
if trained_betas is not None:
UpperCamelCase__ = torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase__ = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase__ = betas_for_alpha_bar(lowerCamelCase_ , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCamelCase__ = betas_for_alpha_bar(lowerCamelCase_ , alpha_transform_type="exp" )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
UpperCamelCase__ = 1.0 - self.betas
UpperCamelCase__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = use_karras_sigmas
def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any]=None ) -> Dict:
"""simple docstring"""
if schedule_timesteps is None:
UpperCamelCase__ = self.timesteps
UpperCamelCase__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase__ = 1 if len(lowerCamelCase_ ) > 1 else 0
else:
UpperCamelCase__ = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
UpperCamelCase__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self :Dict ) -> List[str]:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self :Union[str, Any] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase__ = self.index_for_timestep(lowerCamelCase_ )
UpperCamelCase__ = self.sigmas[step_index]
UpperCamelCase__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self :Optional[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, torch.device] = None , lowerCamelCase_ :Optional[int] = None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase__ = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase__ = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase__ = (np.arange(lowerCamelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
UpperCamelCase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase__ = np.log(lowerCamelCase_ )
UpperCamelCase__ = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_ ) ) , lowerCamelCase_ )
if self.config.use_karras_sigmas:
UpperCamelCase__ = self._convert_to_karras(in_sigmas=lowerCamelCase_ , num_inference_steps=self.num_inference_steps )
UpperCamelCase__ = np.array([self._sigma_to_t(lowerCamelCase_ , lowerCamelCase_ ) for sigma in sigmas] )
UpperCamelCase__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase__ = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ )
UpperCamelCase__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase__ = torch.from_numpy(lowerCamelCase_ )
UpperCamelCase__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCamelCase_ ).startswith("mps" ):
# mps does not support float64
UpperCamelCase__ = timesteps.to(lowerCamelCase_ , dtype=torch.floataa )
else:
UpperCamelCase__ = timesteps.to(device=lowerCamelCase_ )
# empty dt and derivative
UpperCamelCase__ = None
UpperCamelCase__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase__ = defaultdict(lowerCamelCase_ )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = np.log(lowerCamelCase_ )
# get distribution
UpperCamelCase__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCamelCase__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCamelCase__ = low_idx + 1
UpperCamelCase__ = log_sigmas[low_idx]
UpperCamelCase__ = log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase__ = (low - log_sigma) / (low - high)
UpperCamelCase__ = np.clip(lowerCamelCase_ , 0 , 1 )
# transform interpolation to time range
UpperCamelCase__ = (1 - w) * low_idx + w * high_idx
UpperCamelCase__ = t.reshape(sigma.shape )
return t
def lowerCamelCase__ ( self :int , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :List[str] ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase__ = in_sigmas[-1].item()
UpperCamelCase__ = in_sigmas[0].item()
UpperCamelCase__ = 7.0 # 7.0 is the value used in the paper
UpperCamelCase__ = np.linspace(0 , 1 , lowerCamelCase_ )
UpperCamelCase__ = sigma_min ** (1 / rho)
UpperCamelCase__ = sigma_max ** (1 / rho)
UpperCamelCase__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCamelCase__ ( self :List[str] ) -> List[str]:
"""simple docstring"""
return self.dt is None
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :Union[torch.FloatTensor, np.ndarray] , lowerCamelCase_ :Union[float, torch.FloatTensor] , lowerCamelCase_ :Union[torch.FloatTensor, np.ndarray] , lowerCamelCase_ :bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
UpperCamelCase__ = self.index_for_timestep(lowerCamelCase_ )
# advance index counter by 1
UpperCamelCase__ = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase__ = self.sigmas[step_index]
UpperCamelCase__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCamelCase__ = self.sigmas[step_index - 1]
UpperCamelCase__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase__ = 0
UpperCamelCase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase__ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase__ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCamelCase__ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
UpperCamelCase__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase__ = sigma_next - sigma_hat
# store for 2nd order step
UpperCamelCase__ = derivative
UpperCamelCase__ = dt
UpperCamelCase__ = sample
else:
# 2. 2nd order / Heun's method
UpperCamelCase__ = (sample - pred_original_sample) / sigma_next
UpperCamelCase__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCamelCase__ = self.dt
UpperCamelCase__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ):
# mps does not support float64
UpperCamelCase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCamelCase__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCamelCase__ = self.timesteps.to(original_samples.device )
UpperCamelCase__ = timesteps.to(original_samples.device )
UpperCamelCase__ = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_ ) for t in timesteps]
UpperCamelCase__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase__ = sigma.unsqueeze(-1 )
UpperCamelCase__ = original_samples + noise * sigma
return noisy_samples
def __len__( self :Dict ) -> List[Any]:
"""simple docstring"""
return self.config.num_train_timesteps | 304 | 0 |
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return True
lowercase_ = series[1] - series[0]
for index in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def A_ ( SCREAMING_SNAKE_CASE_ ) ->float:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
lowercase_ = 0
for val in series:
answer += val
return answer / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 | '''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->bool:
if num < 0:
return False
lowercase_ = num
lowercase_ = 0
while num > 0:
lowercase_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 | 1 |
"""simple docstring"""
from math import factorial
def snake_case ( lowerCAmelCase_ = 100 ) -> int:
return sum(map(lowerCAmelCase_ , str(factorial(lowerCAmelCase_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 404 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = '''time_series_transformer'''
A__ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "student_t" , __lowerCamelCase : str = "nll" , __lowerCamelCase : int = 1 , __lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCamelCase : Optional[Union[str, bool]] = "mean" , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : bool = True , __lowerCamelCase : str = "gelu" , __lowerCamelCase : int = 6_4 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 1_0_0 , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
# time series specific configuration
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = cardinality
else:
_snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = embedding_dimension
else:
_snake_case = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(__lowerCamelCase ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 404 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''', _a, )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = RobertaConfig
lowerCamelCase_ : Union[str, Any] = '''roberta'''
def __init__(self , __magic_name__ ) -> List[str]:
'''simple docstring'''
super().__init__(__magic_name__ )
snake_case_ : Optional[Any] = RobertaEmbeddings(__magic_name__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''', _a, )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = RobertaConfig
lowerCamelCase_ : Optional[Any] = '''roberta'''
def __init__(self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__magic_name__ )
snake_case_ : Any = config.num_labels
snake_case_ : int = config.num_hidden_layers
snake_case_ : List[Any] = DeeRobertaModel(__magic_name__ )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Union[str, Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__magic_name__ )
def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=-1 , __magic_name__=False , ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.num_layers
try:
snake_case_ : Any = self.roberta(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , )
snake_case_ : Optional[int] = outputs[1]
snake_case_ : Any = self.dropout(__magic_name__ )
snake_case_ : Any = self.classifier(__magic_name__ )
snake_case_ : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Tuple = e.message
snake_case_ : int = e.exit_layer
snake_case_ : List[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(__magic_name__ )
snake_case_ : Dict = []
snake_case_ : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Union[str, Any] = MSELoss()
snake_case_ : List[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : int = CrossEntropyLoss()
snake_case_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : int = highway_exit[0]
if not self.training:
highway_logits_all.append(__magic_name__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[Any] = CrossEntropyLoss()
snake_case_ : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__magic_name__ )
if train_highway:
snake_case_ : Any = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : int = (loss,) + outputs
if not self.training:
snake_case_ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : List[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 60 |
import tensorflow as tf
from ...tf_utils import shape_list
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : Dict = d_embed
snake_case_ : Union[str, Any] = d_proj
snake_case_ : str = cutoffs + [vocab_size]
snake_case_ : int = [0] + self.cutoffs
snake_case_ : Optional[int] = div_val
snake_case_ : int = self.cutoffs[0]
snake_case_ : Any = len(self.cutoffs ) - 1
snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters
snake_case_ : str = keep_order
snake_case_ : int = []
snake_case_ : Union[str, Any] = []
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
snake_case_ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(__magic_name__ )
else:
self.out_projs.append(__magic_name__ )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i)
snake_case_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' )
self.out_projs.append(__magic_name__ )
snake_case_ : int = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__magic_name__ )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = x
if proj is not None:
snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ )
return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = shape_list(__magic_name__ )
snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : Dict = tf.stack([r, target] , 1 )
return tf.gather_nd(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = 0
if self.n_clusters == 0:
snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ )
snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(__magic_name__ )
snake_case_ : int = []
snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : str = (target >= l_idx) & (target < r_idx)
snake_case_ : Dict = tf.where(__magic_name__ )
snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx
if self.div_val == 1:
snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : Union[str, Any] = self.out_layers[i][0]
snake_case_ : int = self.out_layers[i][1]
if i == 0:
snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] )
snake_case_ : Any = tf.nn.log_softmax(__magic_name__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ )
else:
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] )
snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ )
snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__magic_name__ )
if target is not None:
snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) )
snake_case_ : str = tf.concat(__magic_name__ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : int = tf.reduce_mean(__magic_name__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__magic_name__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 60 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __UpperCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : List[Any] , a_ : Optional[Any]=None , **a_ : str ) -> List[Any]:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
a__ : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase ( self : Dict , a_ : List[Any] ) -> Any:
'''simple docstring'''
import torch
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__UpperCamelCase )
return column
def UpperCAmelCase ( self : Dict , a_ : Any ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a__ : Optional[int] = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
a__ : Union[str, Any] = {"dtype": torch.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a__ : Optional[Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
a__ : Union[str, Any] = np.asarray(__UpperCamelCase )
return torch.tensor(__UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase ( self : Any , a_ : List[str] ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(__UpperCamelCase , "__array__" ) and not isinstance(__UpperCamelCase , torch.Tensor ):
a__ : List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def UpperCAmelCase ( self : Tuple , a_ : Tuple ) -> List[str]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def UpperCAmelCase ( self : List[str] , a_ : str ) -> Optional[int]:
'''simple docstring'''
a__ : int = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
a__ : List[str] = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def UpperCAmelCase ( self : int , a_ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : int = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
a__ : Optional[int] = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
a__ : str = self.recursive_tensorize(__UpperCamelCase )
a__ : str = self._consolidate(__UpperCamelCase )
return column
def UpperCAmelCase ( self : List[Any] , a_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
a__ : List[Any] = self.python_features_decoder.decode_batch(__UpperCamelCase )
a__ : Union[str, Any] = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
a__ : int = self._consolidate(batch[column_name] )
return batch | 702 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowercase__ ( lowerCAmelCase__ : List[str] ) -> str:
'''simple docstring'''
a__ : Any = create_tensor(lowerCAmelCase__ )
a__ : Optional[Any] = gather(lowerCAmelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowercase__ ( lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = [state.process_index]
a__ : Optional[int] = gather_object(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == state.num_processes, F"{gathered_obj}, {len(lowerCAmelCase__ )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ : str = create_tensor(lowerCAmelCase__ )
a__ : Any = broadcast(lowerCAmelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowercase__ ( lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
a__ : Any = torch.arange(state.num_processes + 1 ).to(state.device )
else:
a__ : Union[str, Any] = torch.arange(state.num_processes ).to(state.device )
a__ : List[Any] = pad_across_processes(lowerCAmelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowercase__ ( lowerCAmelCase__ : Dict ) -> str:
'''simple docstring'''
# For now runs on only two processes
if state.num_processes != 2:
return
a__ : List[str] = create_tensor(lowerCAmelCase__ )
a__ : Union[str, Any] = reduce(lowerCAmelCase__ , "sum" )
a__ : List[str] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F"{reduced_tensor} != {truth_tensor}"
def lowercase__ ( lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
# For now runs on only two processes
if state.num_processes != 2:
return
a__ : Tuple = create_tensor(lowerCAmelCase__ )
a__ : Dict = reduce(lowerCAmelCase__ , "mean" )
a__ : Tuple = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F"{reduced_tensor} != {truth_tensor}"
def lowercase__ ( lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = PartialState()
state.print(F"State: {state}" )
state.print("testing gather" )
test_gather(lowerCAmelCase__ )
state.print("testing gather_object" )
test_gather_object(lowerCAmelCase__ )
state.print("testing broadcast" )
test_broadcast(lowerCAmelCase__ )
state.print("testing pad_across_processes" )
test_pad_across_processes(lowerCAmelCase__ )
state.print("testing reduce_sum" )
test_reduce_sum(lowerCAmelCase__ )
state.print("testing reduce_mean" )
test_reduce_mean(lowerCAmelCase__ )
if __name__ == "__main__":
main() | 251 | 0 |
'''simple docstring'''
from manim import *
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :str = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE__ :int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE__ :int = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE__ :str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ :Optional[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ :Optional[int] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE__ :Dict = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE__ :List[str] = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = Text('CPU' , font_size=24 )
SCREAMING_SNAKE_CASE__ :Any = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE__ :Optional[Any] = Text('GPU' , font_size=24 )
SCREAMING_SNAKE_CASE__ :Optional[Any] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ :Dict = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE__ :Optional[int] = Text('Model' , font_size=24 )
SCREAMING_SNAKE_CASE__ :Dict = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = []
SCREAMING_SNAKE_CASE__ :str = []
for i, rect in enumerate(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :List[Any] = fill.copy().set_fill(UpperCamelCase_ , opacity=0.8 )
target.move_to(UpperCamelCase_ )
model_arr.append(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCamelCase_ )
self.add(*UpperCamelCase_ , *UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ :List[Any] = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ :List[Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE__ :List[str] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE__ :Any = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE__ :str = Text('Disk' , font_size=24 )
SCREAMING_SNAKE_CASE__ :Any = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(UpperCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Optional[Any] = Square(0.3 )
input.set_fill(UpperCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCamelCase_ , buff=0.5 )
self.play(Write(UpperCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCamelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCamelCase_ ) )
self.play(FadeOut(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :str = Arrow(start=UpperCamelCase_ , end=UpperCamelCase_ , color=UpperCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE__ :Tuple = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE__ :List[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCamelCase_ ) , Circumscribe(model_arr[0] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase_ , **UpperCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE__ :int = AnimationGroup(
FadeOut(UpperCamelCase_ , run_time=0.5 ) , MoveToTarget(UpperCamelCase_ , run_time=0.5 ) , FadeIn(UpperCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE__ :Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCamelCase_ , **UpperCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase_ , **UpperCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE__ :int = a_c
SCREAMING_SNAKE_CASE__ :Optional[int] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCamelCase_ ) , FadeOut(UpperCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE__ :str = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ , run_time=3 ) , MoveToTarget(UpperCamelCase_ ) )
self.wait()
| 209 | '''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : int = 32 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_55 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCamelCase_ : bool = True , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=30 , UpperCamelCase_ : Tuple=4_00 , UpperCamelCase_ : List[Any]=3 , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :List[str] = parent
SCREAMING_SNAKE_CASE__ :Tuple = do_resize
SCREAMING_SNAKE_CASE__ :List[Any] = size if size is not None else {'shortest_edge': 2_88}
SCREAMING_SNAKE_CASE__ :str = size_divisor
SCREAMING_SNAKE_CASE__ :Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ :Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE__ :str = do_normalize
SCREAMING_SNAKE_CASE__ :int = do_center_crop
SCREAMING_SNAKE_CASE__ :Optional[Any] = image_mean
SCREAMING_SNAKE_CASE__ :str = image_std
SCREAMING_SNAKE_CASE__ :Optional[Any] = do_pad
SCREAMING_SNAKE_CASE__ :Tuple = batch_size
SCREAMING_SNAKE_CASE__ :List[str] = num_channels
SCREAMING_SNAKE_CASE__ :Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ :Optional[Any] = max_resolution
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __lowerCamelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ :Dict = self.size['shortest_edge']
SCREAMING_SNAKE_CASE__ :List[Any] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ :Optional[int] = size / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :List[Any] = size, scale * w
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = scale * h, size
SCREAMING_SNAKE_CASE__ :Any = int((13_33 / 8_00) * size )
if max(UpperCamelCase_ , UpperCamelCase_ ) > max_size:
SCREAMING_SNAKE_CASE__ :Tuple = max_size / max(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = newh * scale
SCREAMING_SNAKE_CASE__ :Any = neww * scale
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ :Any = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
SCREAMING_SNAKE_CASE__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ :List[str] = BridgeTowerImageProcessingTester(self )
@property
def __lowerCamelCase ( self : int ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'size_divisor' ) )
def __lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
pass
def __lowerCamelCase ( self : int ) -> Dict:
# Initialize image processor
SCREAMING_SNAKE_CASE__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ :str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ :Any = image_processing(UpperCamelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
# Initialize image processor
SCREAMING_SNAKE_CASE__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ :str = image_processing(UpperCamelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[str] ) -> List[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ :List[Any] = image_processing(UpperCamelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 209 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = "▁"
__UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =BertGenerationTokenizer
UpperCAmelCase_ =False
UpperCAmelCase_ =True
def _UpperCamelCase ( self ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE_ = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = '''<s>'''
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def _UpperCamelCase ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = BertGenerationTokenizer(_A , keep_accents=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _UpperCamelCase ( self ) -> int:
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = '''Hello World!'''
SCREAMING_SNAKE_CASE_ = [18536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
SCREAMING_SNAKE_CASE_ = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def _UpperCamelCase ( self ) -> List[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
SCREAMING_SNAKE_CASE_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE_ = ''' '''.join(_A )
SCREAMING_SNAKE_CASE_ = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = BertGenerationConfig()
SCREAMING_SNAKE_CASE_ = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def _UpperCamelCase ( self ) -> Dict:
# fmt: off
SCREAMING_SNAKE_CASE_ = {'''input_ids''': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 597 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="wavlm"
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.02 , _A=1E-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=320 , _A=800 , _A=False , _A=True , _A=0.05 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=80 , _A=0 , _A=1 , _A=2 , _A=False , _A=3 , _A=2 , _A=3 , _A=None , **_A , ) -> str:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = feat_extract_norm
SCREAMING_SNAKE_CASE_ = feat_extract_activation
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = conv_bias
SCREAMING_SNAKE_CASE_ = num_buckets
SCREAMING_SNAKE_CASE_ = max_bucket_distance
SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = feat_proj_dropout
SCREAMING_SNAKE_CASE_ = final_dropout
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_ctc_classes
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ = apply_spec_augment
SCREAMING_SNAKE_CASE_ = mask_time_prob
SCREAMING_SNAKE_CASE_ = mask_time_length
SCREAMING_SNAKE_CASE_ = mask_time_min_masks
SCREAMING_SNAKE_CASE_ = mask_feature_prob
SCREAMING_SNAKE_CASE_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ = num_codevector_groups
SCREAMING_SNAKE_CASE_ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ = num_negatives
SCREAMING_SNAKE_CASE_ = codevector_dim
SCREAMING_SNAKE_CASE_ = proj_codevector_dim
SCREAMING_SNAKE_CASE_ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE_ = add_adapter
SCREAMING_SNAKE_CASE_ = adapter_kernel_size
SCREAMING_SNAKE_CASE_ = adapter_stride
SCREAMING_SNAKE_CASE_ = num_adapter_layers
SCREAMING_SNAKE_CASE_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = xvector_output_dim
@property
def _UpperCamelCase ( self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 597 | 1 |
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
if not (isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__UpperCamelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__UpperCamelCase = i
__UpperCamelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 1 |
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict , UpperCamelCase: float , UpperCamelCase: int ) -> List[Any]:
if k in (0.04, 0.06):
snake_case__ = k
snake_case__ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self: Optional[Any] ) -> str:
return str(self.k )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: str ) -> tuple[cva.Mat, list[list[int]]]:
snake_case__ = cva.imread(UpperCamelCase , 0 )
snake_case__ , snake_case__ = img.shape
snake_case__ = []
snake_case__ = img.copy()
snake_case__ = cva.cvtColor(UpperCamelCase , cva.COLOR_GRAY2RGB )
snake_case__ , snake_case__ = np.gradient(UpperCamelCase )
snake_case__ = dx**2
snake_case__ = dy**2
snake_case__ = dx * dy
snake_case__ = 0.04
snake_case__ = self.window_size // 2
for y in range(UpperCamelCase , h - offset ):
for x in range(UpperCamelCase , w - offset ):
snake_case__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case__ = (wxx * wyy) - (wxy**2)
snake_case__ = wxx + wyy
snake_case__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase : str = HarrisCorner(0.0_4, 3)
__UpperCamelCase , __UpperCamelCase : List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 372 |
import qiskit
def a_ ( _A = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
snake_case__ = qubits
# Using Aer's simulator
snake_case__ = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
snake_case__ = qiskit.QuantumCircuit(_A , _A )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _A ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _A )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_A ) ) , list(range(_A ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
snake_case__ = qiskit.execute(_A , _A , shots=1000 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 372 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
_lowerCamelCase : Optional[int] = 'A painting of a squirrel eating a burger'
_lowerCamelCase : List[Any] = jax.device_count()
_lowerCamelCase : Optional[int] = num_samples * [prompt]
_lowerCamelCase : Optional[int] = sd_pipe.prepare_inputs(lowercase )
_lowerCamelCase : Dict = replicate(lowercase )
_lowerCamelCase : int = shard(lowercase )
_lowerCamelCase : Dict = jax.random.PRNGKey(0 )
_lowerCamelCase : Any = jax.random.split(lowercase , jax.device_count() )
_lowerCamelCase : int = sd_pipe(lowercase , lowercase , lowercase , num_inference_steps=25 , jit=lowercase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCamelCase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase : Optional[int] = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase : List[Any] = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Optional[int] = 'stabilityai/stable-diffusion-2'
_lowerCamelCase, _lowerCamelCase : int = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase , subfolder='scheduler' )
_lowerCamelCase, _lowerCamelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , revision='bf16' , dtype=jnp.bfloataa , )
_lowerCamelCase : Tuple = scheduler_params
_lowerCamelCase : Union[str, Any] = 'A painting of a squirrel eating a burger'
_lowerCamelCase : int = jax.device_count()
_lowerCamelCase : Dict = num_samples * [prompt]
_lowerCamelCase : Any = sd_pipe.prepare_inputs(lowercase )
_lowerCamelCase : Dict = replicate(lowercase )
_lowerCamelCase : Optional[int] = shard(lowercase )
_lowerCamelCase : List[str] = jax.random.PRNGKey(0 )
_lowerCamelCase : Union[str, Any] = jax.random.split(lowercase , jax.device_count() )
_lowerCamelCase : int = sd_pipe(lowercase , lowercase , lowercase , num_inference_steps=25 , jit=lowercase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCamelCase : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase : Optional[int] = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase : int = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 630 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = """google/mobilebert-uncased"""
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[int] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowerCamelCase : Any = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A_ ( self , lowercase ):
_lowerCamelCase : Union[str, Any] = 'UNwant\u00E9d,running'
_lowerCamelCase : List[Any] = 'unwanted, running'
return input_text, output_text
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class(self.vocab_file )
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [9, 6, 7, 12, 10, 11] )
def A_ ( self ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Any = self.get_rust_tokenizer()
_lowerCamelCase : int = 'UNwant\u00E9d,running'
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize(lowercase )
_lowerCamelCase : List[Any] = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : Dict = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Dict = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.encode(lowercase )
_lowerCamelCase : List[str] = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
# With lower casing
_lowerCamelCase : List[Any] = self.get_tokenizer(do_lower_case=lowercase )
_lowerCamelCase : int = self.get_rust_tokenizer(do_lower_case=lowercase )
_lowerCamelCase : Optional[Any] = 'UNwant\u00E9d,running'
_lowerCamelCase : Dict = tokenizer.tokenize(lowercase )
_lowerCamelCase : int = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : Dict = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Dict = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : Any = self.get_rust_tokenizer()
_lowerCamelCase : Union[str, Any] = tokenizer.encode(lowercase )
_lowerCamelCase : Dict = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self ):
_lowerCamelCase : List[str] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self ):
_lowerCamelCase : List[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self ):
_lowerCamelCase : int = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self ):
_lowerCamelCase : List[str] = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self ):
_lowerCamelCase : int = BasicTokenizer(do_lower_case=lowercase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_lowerCamelCase : Tuple = {}
for i, token in enumerate(lowercase ):
_lowerCamelCase : Union[str, Any] = i
_lowerCamelCase : str = WordpieceTokenizer(vocab=lowercase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def A_ ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
_lowerCamelCase : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
_lowerCamelCase : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
_lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase )
_lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def A_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : int = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_lowerCamelCase : int = tokenizer_r.encode_plus(
lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , )
_lowerCamelCase : Tuple = tokenizer_r.do_lower_case if hasattr(lowercase , 'do_lower_case' ) else False
_lowerCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def A_ ( self ):
_lowerCamelCase : Tuple = ['的', '人', '有']
_lowerCamelCase : Optional[Any] = ''.join(lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int = True
_lowerCamelCase : Any = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : List[str] = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : List[Any] = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Any = tokenizer_r.convert_ids_to_tokens(lowercase )
_lowerCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : Optional[Any] = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : str = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(lowercase )
_lowerCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCamelCase : List[str] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowercase )
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase ) | 630 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_00 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ) -> Dict:
A : Optional[Any] = parent
A : Dict = 1_00
A : int = batch_size
A : str = image_size
A : Optional[Any] = patch_size
A : int = num_channels
A : Optional[int] = is_training
A : List[str] = use_labels
A : Tuple = hidden_size
A : Dict = num_hidden_layers
A : int = num_attention_heads
A : Optional[Any] = intermediate_size
A : List[str] = hidden_act
A : int = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Union[str, Any] = type_sequence_label_size
A : str = initializer_range
A : Any = scope
A : Dict = out_indices
A : Any = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A : str = (image_size // patch_size) ** 2
A : Any = num_patches + 1
def snake_case ( self ) -> str:
A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Any = None
A : Any = None
if self.use_labels:
A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case ( self ) -> str:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
A : Dict = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
A : Optional[Any] = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
A : Optional[int] = self.type_sequence_label_size
A : List[str] = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : int = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : List[str] = 1
A : Union[str, Any] = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
A : Tuple = self.num_labels
A : Optional[int] = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A : Dict = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def snake_case ( self ) -> Any:
A : Tuple = self.prepare_config_and_inputs()
A , A , A , A : List[Any] = config_and_inputs
A : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase_ : str = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[Any] = False
def snake_case ( self ) -> List[Any]:
A : Optional[int] = BeitModelTester(self )
A : Dict = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def snake_case ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def snake_case ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self ) -> int:
pass
def snake_case ( self ) -> Dict:
A , A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def snake_case ( self ) -> int:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[str] = model_class(__UpperCAmelCase )
A : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : List[str] = [*signature.parameters.keys()]
A : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def snake_case ( self ) -> str:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ) -> int:
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def snake_case ( self ) -> Optional[int]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def snake_case ( self ) -> int:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def snake_case ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
A : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
A : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
A : Union[str, Any] = model(**__UpperCAmelCase ).loss
loss.backward()
def snake_case ( self ) -> Optional[Any]:
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Dict = False
A : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A : Union[str, Any] = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
A : Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
A : List[Any] = model(**__UpperCAmelCase ).loss
loss.backward()
def snake_case ( self ) -> Any:
A , A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : List[Any] = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
A : Any = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def snake_case ( self ) -> Union[str, Any]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[int] = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def snake_case__ ( ):
A : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ) -> Optional[Any]:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def snake_case ( self ) -> int:
A : int = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__UpperCAmelCase )
A : Any = self.default_image_processor
A : List[str] = prepare_img()
A : Any = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
A : List[Any] = torch.ones((1, 1_96) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
A : Any = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
A : List[Any] = outputs.logits
# verify the logits
A : Union[str, Any] = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , __UpperCAmelCase )
A : Union[str, Any] = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def snake_case ( self ) -> int:
A : Optional[int] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__UpperCAmelCase )
A : Union[str, Any] = self.default_image_processor
A : Optional[int] = prepare_img()
A : int = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
A : int = model(**__UpperCAmelCase )
A : Optional[int] = outputs.logits
# verify the logits
A : Optional[Any] = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , __UpperCAmelCase )
A : List[Any] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
A : str = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def snake_case ( self ) -> Tuple:
A : Dict = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
__UpperCAmelCase )
A : Optional[Any] = self.default_image_processor
A : Dict = prepare_img()
A : Any = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
A : Dict = model(**__UpperCAmelCase )
A : str = outputs.logits
# verify the logits
A : List[str] = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , __UpperCAmelCase )
A : Dict = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
A : List[Any] = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def snake_case ( self ) -> str:
A : str = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
A : int = model.to(__UpperCAmelCase )
A : List[Any] = BeitImageProcessor(do_resize=__UpperCAmelCase , size=6_40 , do_center_crop=__UpperCAmelCase )
A : Dict = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
A : Dict = Image.open(ds[0]['''file'''] )
A : Dict = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
A : Dict = model(**__UpperCAmelCase )
A : Optional[Any] = outputs.logits
# verify the logits
A : Dict = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , __UpperCAmelCase )
A : Tuple = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
A : Dict = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
A : Tuple = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ) -> Dict:
A : List[Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
A : int = model.to(__UpperCAmelCase )
A : Tuple = BeitImageProcessor(do_resize=__UpperCAmelCase , size=6_40 , do_center_crop=__UpperCAmelCase )
A : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
A : Union[str, Any] = Image.open(ds[0]['''file'''] )
A : int = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
A : List[Any] = model(**__UpperCAmelCase )
A : List[str] = outputs.logits.detach().cpu()
A : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(5_00, 3_00)] )
A : Dict = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
A : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
A : str = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 423 |
def snake_case__ ( lowerCamelCase_ ):
A : Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( lowerCamelCase_ ):
A : Optional[Any] = 0
while number > 0:
A : Tuple = number % 10
sum_of_digits += last_digit
A : Optional[int] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( lowerCamelCase_ = 100 ):
A : Optional[Any] = factorial(lowerCamelCase_ )
A : Tuple = split_and_add(lowerCamelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 423 | 1 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if number > 0:
raise ValueError("""input must be a negative integer""" )
UpperCamelCase : Optional[Any] = len(bin(_lowerCAmelCase )[3:] )
UpperCamelCase : str = bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Any = (
(
"""1"""
+ """0""" * (binary_number_length - len(_lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( _a):
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __SCREAMING_SNAKE_CASE : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self : List[Any] ):
raise NotImplementedError()
| 333 | 0 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def _SCREAMING_SNAKE_CASE ( A : Tuple , A : List[str]=10_00 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__snake_case : List[str] = n - 1
__snake_case : List[str] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__snake_case : Dict = 0
while count < prec:
__snake_case : str = random.randint(2 , n - 1 )
__snake_case : int = bin_exp_mod(A , A , A )
if b != 1:
__snake_case : List[str] = True
for _ in range(A ):
if b == n - 1:
__snake_case : str = False
break
__snake_case : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__A = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 61 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__snake_case : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) )
return round(A , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 451 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class _a ( __a ):
"""simple docstring"""
A_ = '''camembert'''
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any]=30_522 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Dict=12 , lowercase_ : Tuple=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Optional[int]=2 , lowercase_ : str=0.0_2 , lowercase_ : int=1e-12 , lowercase_ : str=1 , lowercase_ : List[str]=0 , lowercase_ : int=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=None , **lowercase_ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = classifier_dropout
class _a ( __a ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 451 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_: Any = logging.get_logger(__name__)
def __a ( A ):
'''simple docstring'''
lowercase__ = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
lowercase__ = 10_24
lowercase__ = 40_96
lowercase__ = 24
lowercase__ = 16
lowercase__ = [5, 11, 17, 23]
lowercase__ = [2_56, 5_12, 10_24, 10_24]
lowercase__ = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
lowercase__ = 7_68
lowercase__ = [1, 1, 1, 0.5]
lowercase__ = [2_56, 5_12, 7_68, 7_68]
lowercase__ = 1_50
lowercase__ = 16
lowercase__ = (1, 3_84, 3_84)
lowercase__ = False
lowercase__ = "project"
if "ade" in checkpoint_url:
lowercase__ = True
lowercase__ = 7_68
lowercase__ = [1, 1, 1, 0.5]
lowercase__ = 1_50
lowercase__ = 16
lowercase__ = "huggingface/label-files"
lowercase__ = "ade20k-id2label.json"
lowercase__ = json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ = {int(A ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __a ( A ):
'''simple docstring'''
lowercase__ = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(A , A )
def __a ( A ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
lowercase__ = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
lowercase__ = name.replace("patch_embed" , "" )
if "pos_embed" in name:
lowercase__ = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
lowercase__ = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase__ = name.replace("proj" , "projection" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
lowercase__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
lowercase__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
lowercase__ = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
lowercase__ = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
lowercase__ = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
lowercase__ = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
lowercase__ = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
lowercase__ = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
lowercase__ = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
lowercase__ = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowercase__ = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
lowercase__ = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
lowercase__ = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
lowercase__ = name.replace("conv1" , "convolution1" )
if "conv2" in name:
lowercase__ = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase__ = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase__ = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase__ = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase__ = name.replace("pretrained" , "dpt" )
if "bn" in name:
lowercase__ = name.replace("bn" , "batch_norm" )
if "head" in name:
lowercase__ = name.replace("head" , "head.head" )
if "encoder.norm" in name:
lowercase__ = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
lowercase__ = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
lowercase__ = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
lowercase__ = name.replace(".." , "." )
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
lowercase__ = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
lowercase__ = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
lowercase__ = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
lowercase__ = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase__ = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def __a ( A , A ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowercase__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[: config.hidden_size, :]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def __a ( ):
'''simple docstring'''
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ , lowercase__ = get_dpt_config(A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase__ = torch.load(A , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(A )
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(A )
lowercase__ = val
# read in qkv matrices
read_in_q_k_v(A , A )
# load HuggingFace model
lowercase__ = DPTForSemanticSegmentation(A ) if "ade" in checkpoint_url else DPTForDepthEstimation(A )
model.load_state_dict(A )
model.eval()
# Check outputs on an image
lowercase__ = 4_80 if "ade" in checkpoint_url else 3_84
lowercase__ = DPTImageProcessor(size=A )
lowercase__ = prepare_img()
lowercase__ = image_processor(A , return_tensors="pt" )
# forward pass
lowercase__ = model(**A ).logits if "ade" in checkpoint_url else model(**A ).predicted_depth
if show_prediction:
lowercase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
lowerCAmelCase_: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
lowerCAmelCase_: Optional[int] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 668 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
"""simple docstring"""
import random
def lowercase (snake_case__ : int ) -> bool:
'''simple docstring'''
lowerCAmelCase = num - 1
lowerCAmelCase = 0
while s % 2 == 0:
lowerCAmelCase = s // 2
t += 1
for _ in range(5 ):
lowerCAmelCase = random.randrange(2 , num - 1 )
lowerCAmelCase = pow(snake_case__ , snake_case__ , snake_case__ )
if v != 1:
lowerCAmelCase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCAmelCase = i + 1
lowerCAmelCase = (v**2) % num
return True
def lowercase (snake_case__ : int ) -> bool:
'''simple docstring'''
if num < 2:
return False
lowerCAmelCase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def lowercase (snake_case__ : int = 1_024 ) -> int:
'''simple docstring'''
while True:
lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
a = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 169 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str=13 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Union[str, Any]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=50 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = use_labels
lowerCAmelCase = scope
def __lowercase ( self : int ):
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowercase ( self : List[str] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowercase ( self : Dict ):
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any] , ):
lowerCAmelCase = BertGenerationEncoder(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
lowerCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple , ):
lowerCAmelCase = True
lowerCAmelCase = BertGenerationEncoder(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int] , ):
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = BertGenerationDecoder(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
# first forward pass
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )["""hidden_states"""][0]
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
def __lowercase ( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , *lowerCAmelCase : Union[str, Any] , ):
lowerCAmelCase = BertGenerationDecoder(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[Any] ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , _a , unittest.TestCase ):
_a = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_a = (BertGenerationDecoder,) if is_torch_available() else ()
_a = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = BertGenerationEncoderTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = """bert"""
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : str ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase )
def __lowercase ( self : List[str] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase )
def __lowercase ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
def __lowercase ( self : List[str] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase )
@slow
def __lowercase ( self : int ):
lowerCAmelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Dict ):
lowerCAmelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
lowerCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase )[0]
lowerCAmelCase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Tuple ):
lowerCAmelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
lowerCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase )[0]
lowerCAmelCase = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
| 169 | 1 |
'''simple docstring'''
from math import loga
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A : Optional[Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __a ( A__ : Any , A__ : Tuple , A__ : str=None ):
if rng is None:
SCREAMING_SNAKE_CASE = random.Random()
SCREAMING_SNAKE_CASE = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE = []
for _ in range(A__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE = np.array(A__ , dtype=jnp.intaa ).reshape(A__ )
return output
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = ids_tensor(A__ , vocab_size=2 , rng=A__ )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE = 1
return attn_mask
@require_flax
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = ()
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE = jnp.ones_like(__lowerCamelCase )
SCREAMING_SNAKE_CASE = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = pt_model_class(__lowerCamelCase ).eval()
SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
SCREAMING_SNAKE_CASE = flax_model.generate(__lowerCamelCase ).sequences
SCREAMING_SNAKE_CASE = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jit(model.generate )
SCREAMING_SNAKE_CASE = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jit(model.generate )
SCREAMING_SNAKE_CASE = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jit(model.generate )
SCREAMING_SNAKE_CASE = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 0.8
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = 0.3
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jit(model.generate )
SCREAMING_SNAKE_CASE = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jit(model.generate )
SCREAMING_SNAKE_CASE = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jit(model.generate )
SCREAMING_SNAKE_CASE = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jit(model.generate )
SCREAMING_SNAKE_CASE = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jit(model.generate )
SCREAMING_SNAKE_CASE = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jit(model.generate )
SCREAMING_SNAKE_CASE = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
SCREAMING_SNAKE_CASE = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE = "Hello world"
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , "foo" ):
SCREAMING_SNAKE_CASE = {"foo": "bar"}
model.generate(__lowerCamelCase , **__lowerCamelCase ) | 16 |
import string
import numpy
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ )
class lowercase_ :
__lowerCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase = numpy.vectorize(lambda A : x % 3_6 )
__lowerCamelCase = numpy.vectorize(A )
def __init__( self , __A ) -> None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.modulus(__A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
SCREAMING_SNAKE_CASE_ : Dict =encrypt_key.shape[0]
def _snake_case ( self , __A ) -> int:
return self.key_string.index(__A )
def _snake_case ( self , __A ) -> str:
return self.key_string[round(__A )]
def _snake_case ( self ) -> None:
SCREAMING_SNAKE_CASE_ : List[str] =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE_ : Optional[int] =det % len(self.key_string )
SCREAMING_SNAKE_CASE_ : List[Any] =len(self.key_string )
if greatest_common_divisor(__A , len(self.key_string ) ) != 1:
SCREAMING_SNAKE_CASE_ : Any =(
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(__A )
def _snake_case ( self , __A ) -> str:
SCREAMING_SNAKE_CASE_ : List[Any] =[char for char in text.upper() if char in self.key_string]
SCREAMING_SNAKE_CASE_ : Optional[Any] =chars[-1]
while len(__A ) % self.break_key != 0:
chars.append(__A )
return "".join(__A )
def _snake_case ( self , __A ) -> str:
SCREAMING_SNAKE_CASE_ : Dict =self.process_text(text.upper() )
SCREAMING_SNAKE_CASE_ : Dict =''''''
for i in range(0 , len(__A ) - self.break_key + 1 , self.break_key ):
SCREAMING_SNAKE_CASE_ : Any =text[i : i + self.break_key]
SCREAMING_SNAKE_CASE_ : Optional[int] =[self.replace_letters(__A ) for char in batch]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =numpy.array([vec] ).T
SCREAMING_SNAKE_CASE_ : List[str] =self.modulus(self.encrypt_key.dot(__A ) ).T.tolist()[
0
]
SCREAMING_SNAKE_CASE_ : List[Any] =''''''.join(
self.replace_digits(__A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _snake_case ( self ) -> numpy.ndarray:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] =det % len(self.key_string )
SCREAMING_SNAKE_CASE_ : List[Any] =None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
SCREAMING_SNAKE_CASE_ : List[str] =i
break
SCREAMING_SNAKE_CASE_ : List[str] =(
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__A ) )
def _snake_case ( self , __A ) -> str:
SCREAMING_SNAKE_CASE_ : int =self.make_decrypt_key()
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.process_text(text.upper() )
SCREAMING_SNAKE_CASE_ : Any =''''''
for i in range(0 , len(__A ) - self.break_key + 1 , self.break_key ):
SCREAMING_SNAKE_CASE_ : List[str] =text[i : i + self.break_key]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[self.replace_letters(__A ) for char in batch]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =numpy.array([vec] ).T
SCREAMING_SNAKE_CASE_ : str =self.modulus(decrypt_key.dot(__A ) ).T.tolist()[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =''''''.join(
self.replace_digits(__A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE_ ( ) -> None:
SCREAMING_SNAKE_CASE_ : List[str] =int(input('''Enter the order of the encryption key: ''' ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[]
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple =[int(UpperCAmelCase_ ) for x in input().split()]
hill_matrix.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =HillCipher(numpy.array(UpperCAmelCase_ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
SCREAMING_SNAKE_CASE_ : Dict =input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
SCREAMING_SNAKE_CASE_ : List[Any] =input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(UpperCAmelCase_ ) )
elif option == "2":
SCREAMING_SNAKE_CASE_ : Any =input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 443 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def A_ ( snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = original_name.split("." )[0]
SCREAMING_SNAKE_CASE:Union[str, Any] = key.split("." )
SCREAMING_SNAKE_CASE:Tuple = int(key_list[key_list.index(snake_case ) - 2] )
SCREAMING_SNAKE_CASE:Optional[Any] = int(key_list[key_list.index(snake_case ) - 1] )
SCREAMING_SNAKE_CASE:List[Any] = orig_block_num - offset
SCREAMING_SNAKE_CASE:Dict = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = OrderedDict()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
SCREAMING_SNAKE_CASE:Tuple = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE:Dict = key[: key.find("proj" )]
SCREAMING_SNAKE_CASE:Dict = key.replace(snake_case , F'''patch_embeddings.{total_embed_found}.''' )
SCREAMING_SNAKE_CASE:Union[str, Any] = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE:Any = "poolformer.encoder." + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE:Tuple = replace_key_with_offset(snake_case , snake_case , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE:int = replace_key_with_offset(snake_case , snake_case , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
SCREAMING_SNAKE_CASE:Union[str, Any] = replace_key_with_offset(snake_case , snake_case , "norm1" , "before_norm" )
if "norm2" in key:
SCREAMING_SNAKE_CASE:Union[str, Any] = replace_key_with_offset(snake_case , snake_case , "norm2" , "after_norm" )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE:Dict = replace_key_with_offset(snake_case , snake_case , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE:str = replace_key_with_offset(snake_case , snake_case , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
SCREAMING_SNAKE_CASE:List[Any] = key.replace("head" , "classifier" )
SCREAMING_SNAKE_CASE:List[str] = value
return new_state_dict
def A_ ( ):
SCREAMING_SNAKE_CASE:List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE:Optional[int] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
@torch.no_grad()
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE:Optional[int] = "huggingface/label-files"
SCREAMING_SNAKE_CASE:List[Any] = model_name[-3:]
SCREAMING_SNAKE_CASE:int = 1000
SCREAMING_SNAKE_CASE:Any = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE:str = (1, 1000)
# set config attributes
SCREAMING_SNAKE_CASE:List[str] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE:Optional[Any] = {int(snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE:str = idalabel
SCREAMING_SNAKE_CASE:Optional[Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE:int = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE:Any = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE:Union[str, Any] = 4.0
SCREAMING_SNAKE_CASE:List[Any] = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE:Union[str, Any] = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE:List[Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE:Union[str, Any] = 4.0
SCREAMING_SNAKE_CASE:Optional[int] = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE:Any = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE:Optional[Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE:Union[str, Any] = 4.0
SCREAMING_SNAKE_CASE:int = 1e-6
SCREAMING_SNAKE_CASE:List[str] = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE:List[Any] = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE:Optional[Any] = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE:Optional[Any] = 4.0
SCREAMING_SNAKE_CASE:Tuple = 1e-6
SCREAMING_SNAKE_CASE:Tuple = 0.95
elif size == "m48":
SCREAMING_SNAKE_CASE:List[str] = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE:int = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE:Optional[Any] = 4.0
SCREAMING_SNAKE_CASE:Optional[int] = 1e-6
SCREAMING_SNAKE_CASE:int = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
SCREAMING_SNAKE_CASE:Dict = PoolFormerImageProcessor(crop_pct=snake_case )
# Prepare image
SCREAMING_SNAKE_CASE:Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE:Optional[Any] = image_processor(images=snake_case , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
SCREAMING_SNAKE_CASE:Tuple = torch.load(snake_case , map_location=torch.device("cpu" ) )
# rename keys
SCREAMING_SNAKE_CASE:Tuple = rename_keys(snake_case )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE:int = PoolFormerForImageClassification(snake_case )
model.load_state_dict(snake_case )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE:Union[str, Any] = PoolFormerImageProcessor(crop_pct=snake_case )
SCREAMING_SNAKE_CASE:str = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE:Dict = model(snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE:str = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
SCREAMING_SNAKE_CASE:int = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
SCREAMING_SNAKE_CASE:int = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
SCREAMING_SNAKE_CASE:Any = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 703 |
'''simple docstring'''
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = word.split()
def justify(snake_case , snake_case , snake_case ) -> str:
SCREAMING_SNAKE_CASE:str = max_width - width
SCREAMING_SNAKE_CASE:Dict = len(snake_case )
if len(snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
SCREAMING_SNAKE_CASE:Optional[int] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
SCREAMING_SNAKE_CASE:List[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
SCREAMING_SNAKE_CASE:List[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(snake_case ):
num_spaces_between_words_list[i] += 1
SCREAMING_SNAKE_CASE:str = []
for i in range(snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(snake_case )
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:list[str] = []
SCREAMING_SNAKE_CASE:List[str] = 0
for word in words:
if width + len(snake_case ) + len(snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(snake_case )
width += len(snake_case )
else:
# justify the line and add it to result
answer.append(justify(snake_case , snake_case , snake_case ) )
# reset new line and new width
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = [word], len(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = max_width - width - len(snake_case )
answer.append(" ".join(snake_case ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 465 | 0 |
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
while second != 0:
lowerCamelCase__: Tuple = first & second
first ^= second
lowerCamelCase__: List[Any] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = int(input('Enter the first number: ').strip())
_lowercase = int(input('Enter the second number: ').strip())
print(F"""{add(first, second) = }""")
| 306 |
import re
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(UpperCAmelCase__ , UpperCAmelCase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 412 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : Any , _a : str , _a : Tuple , _a : int ): # noqa: E741
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def snake_case_ (_a : list[int] ):
if len(_a ) == 0:
return 0
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 358 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : str = len(_lowerCAmelCase ) + 1
_lowerCamelCase : Dict = len(_lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCamelCase : List[Any] = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
# since string of zero length match pattern of zero length
_lowerCamelCase : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCAmelCase ):
_lowerCamelCase : List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCAmelCase ):
_lowerCamelCase : List[Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCamelCase : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCamelCase : Tuple = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCamelCase : Tuple = dp[i - 1][j]
else:
_lowerCamelCase : Dict = 0
else:
_lowerCamelCase : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCAmelCase_ : Optional[Any] = 'aab'
UpperCAmelCase_ : List[str] = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''') | 44 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 0 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCamelCase : Tuple = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCamelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCamelCase : Tuple = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCamelCase : str = f'''down_blocks.{i}.resnets.{j}.'''
lowerCamelCase : Optional[Any] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCamelCase : Optional[Any] = f'''down_blocks.{i}.attentions.{j}.'''
lowerCamelCase : str = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCamelCase : Union[str, Any] = f'''up_blocks.{i}.resnets.{j}.'''
lowerCamelCase : Optional[Any] = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCamelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.'''
lowerCamelCase : Optional[int] = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCamelCase : List[str] = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCamelCase : List[str] = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCamelCase : Optional[int] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase : Optional[int] = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCamelCase : Optional[Any] = '''mid_block.attentions.0.'''
lowerCamelCase : Tuple = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCamelCase : Optional[Any] = f'''mid_block.resnets.{j}.'''
lowerCamelCase : int = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
__lowercase : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__lowercase : Union[str, Any] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__lowercase : List[str] = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__lowercase : int = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : List[Any] = v
__lowercase : List[str] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCamelCase : List[str] = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCamelCase : str = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCamelCase : Dict = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCamelCase : List[str] = f'''down_blocks.{i}.downsamplers.0.'''
lowerCamelCase : Tuple = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCamelCase : Union[str, Any] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase : List[str] = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCamelCase : Dict = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCamelCase : Any = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCamelCase : Optional[Any] = f'''mid_block.resnets.{i}.'''
lowerCamelCase : Dict = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCamelCase : str = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def snake_case_ ( lowerCAmelCase_ : Tuple ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__lowercase : Optional[int] = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Optional[int] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__lowercase : Any = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Optional[Any] = v
__lowercase : Any = {v: vae_state_dict[k] for k, v in mapping.items()}
__lowercase : str = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"mid.attn_1.{weight_name}.weight" in k:
print(F"Reshaping {k} for SD format" )
__lowercase : Dict = reshape_weight_for_sd(lowerCAmelCase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCamelCase : Tuple = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCamelCase : Union[str, Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCamelCase : Any = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCamelCase : Dict = {'''q''': 0, '''k''': 1, '''v''': 2}
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = {}
__lowercase : Tuple = {}
__lowercase : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
__lowercase : List[str] = k[: -len(""".q_proj.weight""" )]
__lowercase : Any = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
__lowercase : Any = [None, None, None]
__lowercase : List[Any] = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
__lowercase : Optional[Any] = k[: -len(""".q_proj.bias""" )]
__lowercase : Optional[Any] = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
__lowercase : Any = [None, None, None]
__lowercase : List[str] = v
continue
__lowercase : List[Any] = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
__lowercase : Union[str, Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
__lowercase : Any = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
__lowercase : List[Any] = torch.cat(lowerCAmelCase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
__lowercase : Union[str, Any] = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
__lowercase : str = torch.cat(lowerCAmelCase_ )
return new_state_dict
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return text_enc_dict
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCamelCase : List[Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCamelCase : Dict = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCamelCase : Tuple = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCamelCase : List[str] = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCamelCase : Any = load_file(unet_path, device='''cpu''')
else:
lowerCamelCase : Optional[Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCamelCase : List[Any] = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCamelCase : List[str] = load_file(vae_path, device='''cpu''')
else:
lowerCamelCase : Tuple = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCamelCase : str = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCamelCase : Optional[Any] = load_file(text_enc_path, device='''cpu''')
else:
lowerCamelCase : Any = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCamelCase : Dict = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCamelCase : Dict = convert_unet_state_dict(unet_state_dict)
lowerCamelCase : Optional[int] = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCamelCase : Optional[int] = convert_vae_state_dict(vae_state_dict)
lowerCamelCase : Optional[Any] = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCamelCase : Union[str, Any] = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCamelCase : int = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCamelCase : Tuple = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCamelCase : str = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCamelCase : Union[str, Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCamelCase : Optional[int] = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCamelCase : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCamelCase : Union[str, Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCamelCase : List[str] = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path) | 649 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 649 | 1 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCAmelCase__ ( UpperCAmelCase_ : Features ) -> Optional[int]:
__lowerCamelCase : int = np.inf
def set_batch_size(UpperCAmelCase_ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCamelCase : Union[str, Any] = min(UpperCAmelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCamelCase : List[Any] = min(UpperCAmelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and feature.dtype == "binary":
__lowerCamelCase : Optional[Any] = min(UpperCAmelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(UpperCAmelCase_ , UpperCAmelCase_ )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(
SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , streaming=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = path_or_paths if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else {self.split: path_or_paths}
__lowerCamelCase : List[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCamelCase : Optional[int] = Parquet(
cache_dir=SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , hash=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> int:
# Build iterable dataset
if self.streaming:
__lowerCamelCase : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : str = None
__lowerCamelCase : int = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Any = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , num_proc=self.num_proc , )
__lowerCamelCase : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
__lowerCamelCase : Tuple = dataset
__lowerCamelCase : List[Any] = path_or_buf
__lowerCamelCase : Tuple = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase : Any = parquet_writer_kwargs
def lowercase_ ( self ) -> int:
__lowerCamelCase : Union[str, Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCamelCase : Union[str, Any] = self._write(file_obj=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , **self.parquet_writer_kwargs )
else:
__lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf , batch_size=SCREAMING_SNAKE_CASE_ , **self.parquet_writer_kwargs )
return written
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop('path_or_buf' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.dataset.features.arrow_schema
__lowerCamelCase : Any = pq.ParquetWriter(SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , SCREAMING_SNAKE_CASE_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCamelCase : Optional[Any] = query_table(
table=self.dataset._data , key=slice(SCREAMING_SNAKE_CASE_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(SCREAMING_SNAKE_CASE_ )
written += batch.nbytes
writer.close()
return written
| 13 |
'''simple docstring'''
from __future__ import annotations
A__ : int = 10
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Tuple = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
__lowerCamelCase : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
from scipy.stats import pearsonr
import datasets
__UpperCamelCase : Tuple = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__UpperCamelCase : Any = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__UpperCamelCase : Tuple = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=False ):
'''simple docstring'''
if return_pvalue:
SCREAMING_SNAKE_CASE : List[Any] = pearsonr(UpperCamelCase__ , UpperCamelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] )}
| 703 | import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
A__ : Union[str, Any] = GPTaTokenizer
A__ : Dict = GPTaTokenizerFast
A__ : int = True
A__ : int = {'''add_prefix_space''': True}
A__ : int = False
def _a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCamelCase__ =dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCamelCase__ =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCamelCase__ ={"unk_token": "<unk>"}
lowerCamelCase__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCamelCase ) )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , _lowerCamelCase ):
lowerCamelCase__ ="lower newer"
lowerCamelCase__ ="lower newer"
return input_text, output_text
def _a ( self ):
lowerCamelCase__ =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ ="lower newer"
lowerCamelCase__ =["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCamelCase__ =tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =tokens + [tokenizer.unk_token]
lowerCamelCase__ =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self ):
if not self.test_rust_tokenizer:
return
lowerCamelCase__ =self.get_tokenizer()
lowerCamelCase__ =self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
lowerCamelCase__ ="lower newer"
# Testing tokenization
lowerCamelCase__ =tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
lowerCamelCase__ =rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids without special tokens
lowerCamelCase__ =tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
lowerCamelCase__ =rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids with special tokens
lowerCamelCase__ =self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
lowerCamelCase__ =tokenizer.encode(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
lowerCamelCase__ =rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing the unknown token
lowerCamelCase__ =tokens + [rust_tokenizer.unk_token]
lowerCamelCase__ =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _a ( self , _lowerCamelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ =self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
lowerCamelCase__ ="This is a simple input"
lowerCamelCase__ =["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase__ =("This is a simple input", "This is a pair")
lowerCamelCase__ =[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , )
def _a ( self ):
lowerCamelCase__ =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowerCamelCase__ ="This is a simple input"
lowerCamelCase__ =["This is a simple input looooooooong", "This is a simple input"]
lowerCamelCase__ =("This is a simple input", "This is a pair")
lowerCamelCase__ =[
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCamelCase__ =tokenizer.pad_token_id
lowerCamelCase__ =tokenizer(_lowerCamelCase , padding="max_length" , max_length=30 , return_tensors="np" )
lowerCamelCase__ =tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors="np" )
lowerCamelCase__ =tokenizer(*_lowerCamelCase , padding="max_length" , max_length=60 , return_tensors="np" )
lowerCamelCase__ =tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def _a ( self ):
lowerCamelCase__ ="$$$"
lowerCamelCase__ =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_lowerCamelCase , add_bos_token=_lowerCamelCase )
lowerCamelCase__ ="This is a simple input"
lowerCamelCase__ =["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase__ =tokenizer.bos_token_id
lowerCamelCase__ =tokenizer(_lowerCamelCase )
lowerCamelCase__ =tokenizer(_lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , _lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase__ =tokenizer.decode(out_s.input_ids )
lowerCamelCase__ =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a ( self ):
pass
def _a ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowerCamelCase__ =[self.get_tokenizer(do_lower_case=_lowerCamelCase , add_bos_token=_lowerCamelCase )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ ="Encode this."
lowerCamelCase__ ="This one too please."
lowerCamelCase__ =tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
encoded_sequence += tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
lowerCamelCase__ =tokenizer.encode_plus(
_lowerCamelCase , _lowerCamelCase , add_special_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , )
lowerCamelCase__ =encoded_sequence_dict["input_ids"]
lowerCamelCase__ =encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
lowerCamelCase__ =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_lowerCamelCase )
]
lowerCamelCase__ =[x for x in filtered_sequence if x is not None]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowerCamelCase__ =AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=_lowerCamelCase )
lowerCamelCase__ ="A photo of a cat"
lowerCamelCase__ =tokenizer.encode(
_lowerCamelCase , )
self.assertEqual(_lowerCamelCase , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
lowerCamelCase__ =AutoTokenizer.from_pretrained("./test_opt" )
lowerCamelCase__ =tokenizer.encode(
_lowerCamelCase , )
self.assertEqual(_lowerCamelCase , [2, 250, 1345, 9, 10, 4758] )
def _a ( self ):
lowerCamelCase__ =AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=_lowerCamelCase )
lowerCamelCase__ ="A photo of a cat"
lowerCamelCase__ =tokenizer.encode(
_lowerCamelCase , )
# Same as above
self.assertEqual(_lowerCamelCase , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def _a ( self ):
lowerCamelCase__ =AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=_lowerCamelCase )
lowerCamelCase__ ="bos"
lowerCamelCase__ =tokenizer.get_vocab()["bos"]
lowerCamelCase__ ="A photo of a cat"
lowerCamelCase__ =tokenizer.encode(
_lowerCamelCase , )
# We changed the bos token
self.assertEqual(_lowerCamelCase , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
lowerCamelCase__ =AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowerCamelCase__ =tokenizer.encode(
_lowerCamelCase , )
self.assertEqual(_lowerCamelCase , [31957, 250, 1345, 9, 10, 4758] )
| 530 | """simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> bool:
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 530 | 1 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
a = 'bert-base-cased'
a = 'fp16'
a = 'bf16'
a = [FPaa, BFaa]
@require_fsdp
@require_cuda
class a_ ( snake_case ):
def UpperCamelCase ( self : List[str] ) -> int:
super().setUp()
snake_case: Optional[int] =dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def UpperCamelCase ( self : Tuple ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(a_ ):
snake_case: int =self.dist_env.copy()
snake_case: Dict =F'''{i + 1}'''
snake_case: Tuple =strategy
with mockenv_context(**a_ ):
snake_case: Optional[int] =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(a_ ):
snake_case: str =self.dist_env.copy()
snake_case: Optional[int] =prefetch_policy
with mockenv_context(**a_ ):
snake_case: str =FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCamelCase ( self : Any ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(a_ ):
snake_case: int =self.dist_env.copy()
snake_case: int =state_dict_type
with mockenv_context(**a_ ):
snake_case: Dict =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCamelCase ( self : int ) -> Tuple:
snake_case: Optional[int] =AutoModel.from_pretrained(a_ )
for policy in FSDP_AUTO_WRAP_POLICY:
snake_case: int =self.dist_env.copy()
snake_case: str =policy
if policy == "TRANSFORMER_BASED_WRAP":
snake_case: int ='BertLayer'
elif policy == "SIZE_BASED_WRAP":
snake_case: Optional[int] ='2000'
with mockenv_context(**a_ ):
snake_case: Tuple =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(a_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
snake_case: Dict =self.dist_env.copy()
snake_case: int ='TRANSFORMER_BASED_WRAP'
snake_case: Optional[int] ='T5Layer'
with mockenv_context(**a_ ):
snake_case: Union[str, Any] =FullyShardedDataParallelPlugin()
with self.assertRaises(a_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(a_ )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
snake_case: List[str] =self.dist_env.copy()
snake_case: str ='SIZE_BASED_WRAP'
snake_case: Union[str, Any] ='0'
with mockenv_context(**a_ ):
snake_case: Union[str, Any] =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(a_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
snake_case: Tuple =self.dist_env.copy()
snake_case: Optional[int] =mp_dtype
with mockenv_context(**a_ ):
snake_case: int =Accelerator()
if mp_dtype == "fp16":
snake_case: Union[str, Any] =torch.floataa
elif mp_dtype == "bf16":
snake_case: List[Any] =torch.bfloataa
snake_case: int =MixedPrecision(param_dtype=a_ , reduce_dtype=a_ , buffer_dtype=a_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , a_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , a_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(a_ )
def UpperCamelCase ( self : Tuple ) -> int:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
snake_case: Tuple =self.dist_env.copy()
snake_case: List[str] =str(a_ ).lower()
with mockenv_context(**a_ ):
snake_case: Any =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=a_ ) )
@require_fsdp
@require_multi_gpu
@slow
class a_ ( snake_case ):
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
super().setUp()
snake_case: Optional[int] =0.8_2
snake_case: Optional[int] =[
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
snake_case: int ={
'multi_gpu_fp16': 3_2_0_0,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2_0_0_0,
'fsdp_full_shard_transformer_based_wrap_fp16': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
snake_case: Dict =1_6_0
snake_case: List[str] =1_6_0
snake_case: List[str] =inspect.getfile(accelerate.test_utils )
snake_case: Optional[int] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def UpperCamelCase ( self : Any ) -> Dict:
snake_case: Any =os.path.join(self.test_scripts_folder , 'test_performance.py' )
snake_case: int =['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
snake_case: Tuple =cmd.copy()
for i, strategy in enumerate(a_ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_ , env=os.environ.copy() )
def UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
snake_case: List[Any] =os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
snake_case: Tuple =[
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(a_ ):
snake_case: List[str] =cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
snake_case: List[str] =len(a_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
snake_case: Any =cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_ , env=os.environ.copy() )
snake_case: Tuple =cmd_config[:-1]
snake_case: Dict =os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_ , env=os.environ.copy() )
def UpperCamelCase ( self : List[str] ) -> int:
snake_case: Optional[Any] =os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
snake_case: Optional[Any] =[
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
snake_case: int =cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(a_ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_ , env=os.environ.copy() )
| 347 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class a_ ( snake_case ):
UpperCAmelCase : Optional[int] = """markuplm"""
def __init__( self : Any , a_ : Optional[int]=3_0_5_2_2 , a_ : Optional[int]=7_6_8 , a_ : List[Any]=1_2 , a_ : int=1_2 , a_ : int=3_0_7_2 , a_ : int="gelu" , a_ : str=0.1 , a_ : Tuple=0.1 , a_ : List[str]=5_1_2 , a_ : Any=2 , a_ : Union[str, Any]=0.0_2 , a_ : Any=1E-1_2 , a_ : Optional[int]=0 , a_ : str=0 , a_ : Optional[Any]=2 , a_ : Optional[Any]=2_5_6 , a_ : Tuple=1_0_2_4 , a_ : List[str]=2_1_6 , a_ : List[str]=1_0_0_1 , a_ : Optional[int]=3_2 , a_ : Optional[int]=5_0 , a_ : Optional[int]="absolute" , a_ : Union[str, Any]=True , a_ : List[Any]=None , **a_ : Union[str, Any] , ) -> Optional[int]:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
snake_case: Tuple =vocab_size
snake_case: int =hidden_size
snake_case: Optional[int] =num_hidden_layers
snake_case: List[str] =num_attention_heads
snake_case: Dict =hidden_act
snake_case: List[Any] =intermediate_size
snake_case: List[str] =hidden_dropout_prob
snake_case: Dict =attention_probs_dropout_prob
snake_case: List[Any] =max_position_embeddings
snake_case: Optional[int] =type_vocab_size
snake_case: Optional[int] =initializer_range
snake_case: List[str] =layer_norm_eps
snake_case: Optional[int] =position_embedding_type
snake_case: int =use_cache
snake_case: Tuple =classifier_dropout
# additional properties
snake_case: Dict =max_depth
snake_case: Optional[Any] =max_xpath_tag_unit_embeddings
snake_case: List[str] =max_xpath_subs_unit_embeddings
snake_case: Optional[int] =tag_pad_id
snake_case: List[str] =subs_pad_id
snake_case: str =xpath_unit_hidden_size
| 347 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A ( self ) -> Optional[Any]:
a_ , a_ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
a_ : Tuple = "A painting of a squirrel eating a burger"
a_ : List[str] = jax.device_count()
a_ : Union[str, Any] = num_samples * [prompt]
a_ : Any = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
a_ : str = replicate(_SCREAMING_SNAKE_CASE )
a_ : str = shard(_SCREAMING_SNAKE_CASE )
a_ : Any = jax.random.PRNGKey(0 )
a_ : str = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
a_ : List[Any] = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=2_5 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
a_ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a_ : str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
a_ : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a_ : str = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A ( self ) -> Union[str, Any]:
a_ : int = "stabilityai/stable-diffusion-2"
a_ , a_ : Optional[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="scheduler" )
a_ , a_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , revision="bf16" , dtype=jnp.bfloataa , )
a_ : List[str] = scheduler_params
a_ : Union[str, Any] = "A painting of a squirrel eating a burger"
a_ : int = jax.device_count()
a_ : List[Any] = num_samples * [prompt]
a_ : Any = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = replicate(_SCREAMING_SNAKE_CASE )
a_ : Any = shard(_SCREAMING_SNAKE_CASE )
a_ : int = jax.random.PRNGKey(0 )
a_ : int = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
a_ : Optional[Any] = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=2_5 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
a_ : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a_ : Union[str, Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
a_ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a_ : int = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 473 | """simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase , UpperCamelCase , UpperCamelCase = False, False, False
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : Optional[str] = None
# Automatically constructed
lowerCAmelCase__ : ClassVar[str] = "dict"
lowerCAmelCase__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
lowerCAmelCase__ : str = field(default="""Audio""", init=__lowerCamelCase, repr=__lowerCamelCase )
def __call__( self ) -> Optional[int]:
return self.pa_type
def A ( self , _SCREAMING_SNAKE_CASE ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"bytes": None, "path": value}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
a_ : Dict = BytesIO()
sf.write(_SCREAMING_SNAKE_CASE , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
a_ : Optional[Any] = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
a_ : List[str] = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7
a_ : Optional[Any] = BytesIO(bytes() )
sf.write(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
a_ , a_ : Tuple = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
a_ : List[Any] = xsplitext(_SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
a_ : Dict = token_per_repo_id or {}
a_ : Optional[int] = path.split("::" )[-1]
try:
a_ : Tuple = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["repo_id"]
a_ : Dict = token_per_repo_id[repo_id]
except (ValueError, KeyError):
a_ : Tuple = None
with xopen(_SCREAMING_SNAKE_CASE , "rb" , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
a_ , a_ : Optional[Any] = sf.read(_SCREAMING_SNAKE_CASE )
else:
a_ , a_ : Optional[int] = sf.read(_SCREAMING_SNAKE_CASE )
a_ : Tuple = array.T
if self.mono:
a_ : Optional[Any] = librosa.to_mono(_SCREAMING_SNAKE_CASE )
if self.sampling_rate and self.sampling_rate != sampling_rate:
a_ : Optional[int] = librosa.resample(_SCREAMING_SNAKE_CASE , orig_sr=_SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate )
a_ : List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def A ( self , _SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
a_ : List[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
a_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
a_ : Tuple = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
a_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
a_ : str = pa.array([Audio().encode_example(_SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
a_ : Union[str, Any] = storage.field("bytes" )
else:
a_ : Optional[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
a_ : List[str] = storage.field("path" )
else:
a_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
a_ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def A ( self , _SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , "rb" ) as f:
a_ : Union[str, Any] = f.read()
return bytes_
a_ : List[str] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
a_ : Tuple = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
a_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
| 473 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[Any] = 'microsoft/speecht5_tts'
lowercase : Tuple = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
lowercase : Any = 'text_reader'
lowercase : str = SpeechTaProcessor
lowercase : List[str] = SpeechTaForTextToSpeech
lowercase : List[Any] = SpeechTaHifiGan
lowercase : List[str] = ['text']
lowercase : Any = ['audio']
def UpperCamelCase__ ( self ) -> Dict:
if self.post_processor is None:
_lowerCAmelCase ='microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , __A , __A=None ) -> str:
_lowerCAmelCase =self.pre_processor(text=__A , return_tensors='pt' , truncation=__A )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
_lowerCAmelCase =load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
_lowerCAmelCase =torch.tensor(embeddings_dataset[7305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , __A ) -> Any:
with torch.no_grad():
return self.model.generate_speech(**__A )
def UpperCamelCase__ ( self , __A ) -> Dict:
with torch.no_grad():
return self.post_processor(__A ).cpu().detach()
| 58 | '''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
if len(__lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(__lowerCamelCase , n - 1 )
rec_insertion_sort(__lowerCamelCase , n - 1 )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowercase_ = (
collection[index],
collection[index - 1],
)
insert_next(__lowerCamelCase , index + 1 )
if __name__ == "__main__":
UpperCAmelCase : Tuple = input("Enter integers separated by spaces: ")
UpperCAmelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 567 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ):
lowercase_ :Dict = get_failure_array(__lowerCamelCase )
# 2) Step through text searching for pattern
lowercase_ , lowercase_ :List[Any] = 0, 0 # index into text, pattern
while i < len(__lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(__lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase_ :Union[str, Any] = failure[j - 1]
continue
i += 1
return False
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :List[Any] = [0]
lowercase_ :Dict = 0
lowercase_ :Dict = 1
while j < len(__lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase_ :Optional[Any] = failure[i - 1]
continue
j += 1
failure.append(__lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
lowerCAmelCase : List[str] ='''abc1abc12'''
lowerCAmelCase : Tuple ='''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCAmelCase : Any ='''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCAmelCase : int ='''ABABX'''
lowerCAmelCase : int ='''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCAmelCase : Optional[Any] ='''AAAB'''
lowerCAmelCase : str ='''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCAmelCase : Any ='''abcdabcy'''
lowerCAmelCase : str ='''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCAmelCase : Optional[Any] ='''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 172 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def __lowerCAmelCase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE = get_cluster_input()
return config
def __lowerCAmelCase ( _UpperCamelCase : Any=None ) -> Dict:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('config' , description=_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate config command' , description=_UpperCamelCase )
parser.add_argument(
'--config_file' , default=_UpperCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE = args.config_file
else:
if not os.path.isdir(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_UpperCamelCase )
else:
config.to_yaml_file(_UpperCamelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = config_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
config_command(_UpperCamelCase )
if __name__ == "__main__":
main()
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 0 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase : str = {
"""E""": 1_2.7_0,
"""T""": 9.0_6,
"""A""": 8.1_7,
"""O""": 7.5_1,
"""I""": 6.9_7,
"""N""": 6.7_5,
"""S""": 6.3_3,
"""H""": 6.0_9,
"""R""": 5.9_9,
"""D""": 4.2_5,
"""L""": 4.0_3,
"""C""": 2.7_8,
"""U""": 2.7_6,
"""M""": 2.4_1,
"""W""": 2.3_6,
"""F""": 2.2_3,
"""G""": 2.0_2,
"""Y""": 1.9_7,
"""P""": 1.9_3,
"""B""": 1.2_9,
"""V""": 0.9_8,
"""K""": 0.7_7,
"""J""": 0.1_5,
"""X""": 0.1_5,
"""Q""": 0.1_0,
"""Z""": 0.0_7,
}
lowercase : Any = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowercase : int = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCamelCase__ ( __lowercase ):
snake_case : str = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCamelCase__ ( __lowercase ):
return x[0]
def lowerCamelCase__ ( __lowercase ):
snake_case : Any = get_letter_count(__lowercase )
snake_case : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowercase )
snake_case : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__lowercase )
snake_case : Tuple = """""".join(freq_to_letter[freq] )
snake_case : Optional[int] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowercase , reverse=__lowercase )
snake_case : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowercase )
def lowerCamelCase__ ( __lowercase ):
snake_case : Optional[Any] = get_frequency_order(__lowercase )
snake_case : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = """pegasus"""
lowerCAmelCase_ : Dict = ["""past_key_values"""]
lowerCAmelCase_ : str = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self ,__a=50_265 ,__a=1_024 ,__a=12 ,__a=4_096 ,__a=16 ,__a=12 ,__a=4_096 ,__a=16 ,__a=0.0 ,__a=0.0 ,__a=True ,__a=True ,__a="gelu" ,__a=1_024 ,__a=0.1 ,__a=0.0 ,__a=0.0 ,__a=0.02 ,__a=0 ,__a=False ,__a=0 ,__a=1 ,__a=1 ,**__a ,) -> int:
snake_case : List[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : List[Any] = d_model
snake_case : str = encoder_ffn_dim
snake_case : List[Any] = encoder_layers
snake_case : Optional[Any] = encoder_attention_heads
snake_case : Union[str, Any] = decoder_ffn_dim
snake_case : Tuple = decoder_layers
snake_case : Union[str, Any] = decoder_attention_heads
snake_case : Union[str, Any] = dropout
snake_case : int = attention_dropout
snake_case : int = activation_dropout
snake_case : Optional[Any] = activation_function
snake_case : Tuple = init_std
snake_case : Union[str, Any] = encoder_layerdrop
snake_case : int = decoder_layerdrop
snake_case : Dict = use_cache
snake_case : Dict = encoder_layers
snake_case : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__a ,eos_token_id=__a ,is_encoder_decoder=__a ,decoder_start_token_id=__a ,forced_eos_token_id=__a ,**__a ,)
@property
def snake_case_ ( self ) -> int:
return self.encoder_attention_heads
@property
def snake_case_ ( self ) -> int:
return self.d_model
| 116 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
snake_case_ : List[str] = {'target_lang': 'fi', 'source_lang': 'en'}
snake_case_ : List[Any] = '>>zh<<'
snake_case_ : str = 'Helsinki-NLP/'
if is_torch_available():
snake_case_ : int = 'pt'
elif is_tf_available():
snake_case_ : str = 'tf'
else:
snake_case_ : Optional[int] = 'jax'
@require_sentencepiece
class __lowerCamelCase ( lowercase , unittest.TestCase ):
lowerCamelCase__: List[str] = MarianTokenizer
lowerCamelCase__: List[Any] = False
lowerCamelCase__: Optional[int] = True
def A__ ( self ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase: List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
UpperCAmelCase: Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
UpperCAmelCase: List[Any] = Path(self.tmpdirname )
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES["target_spm"] )
UpperCAmelCase: List[str] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , **__snake_case ) -> MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def A__ ( self , __snake_case ) -> str:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def A__ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase: Tuple = "</s>"
UpperCAmelCase: List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase: Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__snake_case ) , 9 )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase: List[str] = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
UpperCAmelCase: int = en_de_tokenizer(["I am a small frog"] , return_tensors=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: str = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__snake_case , batch.input_ids[0] )
UpperCAmelCase: List[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__snake_case )
UpperCAmelCase: Optional[Any] = [x.name for x in Path(__snake_case ).glob("*" )]
self.assertIn("source.spm" , __snake_case )
MarianTokenizer.from_pretrained(__snake_case )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase: Optional[Any] = self.get_tokenizer()
UpperCAmelCase: int = tok(
["I am a small frog" * 1_0_0_0, "I am a small frog"] , padding=__snake_case , truncation=__snake_case , return_tensors=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = self.get_tokenizer()
UpperCAmelCase: Union[str, Any] = tok(["I am a tiny frog", "I am a small frog"] , padding=__snake_case , return_tensors=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: List[str] = {"input_ids": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: List[Any] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
UpperCAmelCase: List[str] = "Tämä on testi"
UpperCAmelCase: Any = "This is a test"
UpperCAmelCase: Dict = [7_6, 7, 2_0_4_7, 2]
UpperCAmelCase: List[str] = [6_9, 1_2, 1_1, 9_4_0, 2]
UpperCAmelCase: str = tokenizer(__snake_case ).input_ids
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase: Dict = tokenizer(text_target=__snake_case ).input_ids
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase: Tuple = tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
| 705 |
# Imports
import numpy as np
class __lowerCamelCase :
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None ) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
def A__ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None ) -> Dict:
"""simple docstring"""
if red is not None:
UpperCAmelCase: Optional[Any] = red
if green is not None:
UpperCAmelCase: List[Any] = green
if blue is not None:
UpperCAmelCase: Any = blue
if red_edge is not None:
UpperCAmelCase: Optional[int] = red_edge
if nir is not None:
UpperCAmelCase: int = nir
return True
def A__ ( self , __snake_case="" , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None ) -> List[str]:
"""simple docstring"""
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
UpperCAmelCase: Dict = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def A__ ( self ) -> Any:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def A__ ( self ) -> List[str]:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A__ ( self ) -> Dict:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A__ ( self ) -> str:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A__ ( self ) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A__ ( self , __snake_case=0.08 , __snake_case=1.22 , __snake_case=0.03 ) -> Optional[int]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A__ ( self ) -> Dict:
"""simple docstring"""
return (self.nir / self.green) - 1
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def A__ ( self ) -> str:
"""simple docstring"""
return (self.red - self.blue) / self.red
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A__ ( self ) -> List[str]:
"""simple docstring"""
return self.nir - self.green
def A__ ( self ) -> Dict:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: str = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def A__ ( self , __snake_case=0.16 ) -> Tuple:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def A__ ( self , __snake_case=0.5 ) -> Tuple:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A__ ( self ) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def A__ ( self , __snake_case=None , __snake_case=None ) -> int:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A__ ( self ) -> Any:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def A__ ( self ) -> str:
"""simple docstring"""
return self.nir / self.red
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def A__ ( self ) -> str:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A__ ( self ) -> str:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def A__ ( self ) -> int:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def A__ ( self ) -> Dict:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase: int = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase: Union[str, Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.nir / self.red
def A__ ( self ) -> List[str]:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def A__ ( self ) -> Any:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 166 | 0 |
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] ):
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
__a : str = gray_code_sequence_string(_A )
#
# convert them to integers
for i in range(len(_A ) ):
__a : Optional[Any] = int(sequence[i] , 2 )
return sequence
def __UpperCamelCase ( lowerCAmelCase__ : Dict ):
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__a : int = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__a : Tuple = gray_code_sequence_string(bit_count - 1 )
__a : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__a : Optional[Any] = '''0''' + smaller_sequence[i]
sequence.append(_A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__a : List[str] = '''1''' + smaller_sequence[i]
sequence.append(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521 | import numpy as np
def __lowerCAmelCase ( _A ,_A ,_A = 1E-12 ,_A = 100 ,):
"""simple docstring"""
assert np.shape(_A )[0] == np.shape(_A )[1]
# Ensure proper dimensionality.
assert np.shape(_A )[0] == np.shape(_A )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_A ) == np.iscomplexobj(_A )
_lowercase = np.iscomplexobj(_A )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_A ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowercase = False
_lowercase = 0
_lowercase = 0
_lowercase = 1E12
while not convergence:
# Multiple matrix by the vector.
_lowercase = np.dot(_A ,_A )
# Normalize the resulting output vector.
_lowercase = w / np.linalg.norm(_A )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowercase = vector.conj().T if is_complex else vector.T
_lowercase = np.dot(_A ,np.dot(_A ,_A ) )
# Check convergence.
_lowercase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowercase = True
_lowercase = lambda_
if is_complex:
_lowercase = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowercase = np.array([41, 4, 20] )
_lowercase = real_input_matrix.astype(np.complexaaa )
_lowercase = np.triu(1J * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowercase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowercase = real_input_matrix
_lowercase = real_vector
elif problem_type == "complex":
_lowercase = complex_input_matrix
_lowercase = complex_vector
# Our implementation.
_lowercase , _lowercase = power_iteration(_A ,_A )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowercase , _lowercase = np.linalg.eigh(_A )
# Last eigenvalue is the maximum one.
_lowercase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowercase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_A ) - np.abs(_A ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 398 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__magic_name__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = state_dict.pop(__lowercase )
lowerCamelCase__ = val
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
return new_state_dict
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:256, :]
lowerCamelCase__ = in_proj_bias[:256]
lowerCamelCase__ = in_proj_weight[256:512, :]
lowerCamelCase__ = in_proj_bias[256:512]
lowerCamelCase__ = in_proj_weight[-256:, :]
lowerCamelCase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:256, :]
lowerCamelCase__ = in_proj_bias[:256]
lowerCamelCase__ = in_proj_weight[256:512, :]
lowerCamelCase__ = in_proj_bias[256:512]
lowerCamelCase__ = in_proj_weight[-256:, :]
lowerCamelCase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCamelCase__ = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCamelCase__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCamelCase__ = in_proj_weight_cross_attn[:256, :]
lowerCamelCase__ = in_proj_bias_cross_attn[:256]
lowerCamelCase__ = in_proj_weight_cross_attn[256:512, :]
lowerCamelCase__ = in_proj_bias_cross_attn[256:512]
lowerCamelCase__ = in_proj_weight_cross_attn[-256:, :]
lowerCamelCase__ = in_proj_bias_cross_attn[-256:]
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = image.size
lowerCamelCase__ = max(__lowercase , __lowercase )
lowerCamelCase__ = 800 if """detection""" in checkpoint_url else 1000
lowerCamelCase__ = target_max_size / current_max_size
lowerCamelCase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = F.to_tensor(__lowercase )
lowerCamelCase__ = F.normalize(__lowercase , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
lowerCamelCase__ = torch.hub.load_state_dict_from_url(__lowercase , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
lowerCamelCase__ = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCamelCase__ = state_dict.pop(__lowercase )
lowerCamelCase__ = val
# create HuggingFace model and load state dict
lowerCamelCase__ = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = {0: """table""", 1: """table rotated"""}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
else:
lowerCamelCase__ = 125
lowerCamelCase__ = 6
lowerCamelCase__ = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
lowerCamelCase__ = TableTransformerForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
# verify our conversion
lowerCamelCase__ = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
lowerCamelCase__ = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=__lowercase )
lowerCamelCase__ = Image.open(__lowercase ).convert("""RGB""" )
lowerCamelCase__ = normalize(resize(__lowercase , __lowercase ) ).unsqueeze(0 )
lowerCamelCase__ = model(__lowercase )
if "detection" in checkpoint_url:
lowerCamelCase__ = (1, 15, 3)
lowerCamelCase__ = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
lowerCamelCase__ = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
lowerCamelCase__ = (1, 125, 7)
lowerCamelCase__ = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
lowerCamelCase__ = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowercase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowercase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
lowerCamelCase__ = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(__lowercase )
image_processor.push_to_hub(__lowercase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__magic_name__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 258 |
"""simple docstring"""
__magic_name__ = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
__magic_name__ = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
__magic_name__ = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__magic_name__ = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
__magic_name__ = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
__magic_name__ = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
__magic_name__ = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
__magic_name__ = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 258 | 1 |
from typing import Any
def __lowerCamelCase ( __a :list ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
A__ = [input_list.count(__a ) for value in input_list]
A__ = max(__a ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__a ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Dict = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''glpn'''
def __init__( self : Optional[int] , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=[2, 2, 2, 2] , __lowerCAmelCase : int=[8, 4, 2, 1] , __lowerCAmelCase : int=[32, 64, 1_60, 2_56] , __lowerCAmelCase : str=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : List[str]=[1, 2, 5, 8] , __lowerCAmelCase : List[Any]=[4, 4, 4, 4] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=1e-6 , __lowerCAmelCase : Tuple=64 , __lowerCAmelCase : Tuple=10 , __lowerCAmelCase : Union[str, Any]=-1 , **__lowerCAmelCase : Any , ) -> Tuple:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = num_channels
A__ = num_encoder_blocks
A__ = depths
A__ = sr_ratios
A__ = hidden_sizes
A__ = patch_sizes
A__ = strides
A__ = mlp_ratios
A__ = num_attention_heads
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = drop_path_rate
A__ = layer_norm_eps
A__ = decoder_hidden_size
A__ = max_depth
A__ = head_in_index
| 176 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase :str = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[Any] = "beit"
def __init__( self , lowercase__=8_192 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.0_2 , lowercase__=1E-12 , lowercase__=224 , lowercase__=16 , lowercase__=3 , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=0.1 , lowercase__=0.1 , lowercase__=True , lowercase__=[3, 5, 7, 11] , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=256 , lowercase__=1 , lowercase__=False , lowercase__=255 , **lowercase__ , ) -> Optional[Any]:
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : Any = patch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = use_mask_token
SCREAMING_SNAKE_CASE : Union[str, Any] = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE : int = use_relative_position_bias
SCREAMING_SNAKE_CASE : Optional[int] = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE : str = layer_scale_init_value
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE : str = out_indices
SCREAMING_SNAKE_CASE : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE : Optional[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss_weight
SCREAMING_SNAKE_CASE : List[Any] = auxiliary_channels
SCREAMING_SNAKE_CASE : Dict = auxiliary_num_convs
SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input
SCREAMING_SNAKE_CASE : List[str] = semantic_loss_ignore_index
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[int] = version.parse("1.11" )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCamelCase ( self ) -> float:
return 1E-4
| 179 | '''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_lowerCAmelCase :List[Any] = False
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(
prompt=lowercase__ , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : int = VersatileDiffusionTextToImagePipeline.from_pretrained(lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=lowercase__ , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : int = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=lowercase__ , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 179 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] =42
a : Any =42
class _UpperCAmelCase ( nn.Module ):
a : List[str] =42
a : str =(16, 32, 96, 2_56)
a : int =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = nn.Conv(
self.block_out_channels[0],kernel_size=(3, 3),padding=((1, 1), (1, 1)),dtype=self.dtype,)
__lowerCAmelCase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowerCAmelCase = self.block_out_channels[i]
__lowerCAmelCase = self.block_out_channels[i + 1]
__lowerCAmelCase = nn.Conv(
_a,kernel_size=(3, 3),padding=((1, 1), (1, 1)),dtype=self.dtype,)
blocks.append(_a )
__lowerCAmelCase = nn.Conv(
_a,kernel_size=(3, 3),strides=(2, 2),padding=((1, 1), (1, 1)),dtype=self.dtype,)
blocks.append(_a )
__lowerCAmelCase = blocks
__lowerCAmelCase = nn.Conv(
self.conditioning_embedding_channels,kernel_size=(3, 3),padding=((1, 1), (1, 1)),kernel_init=nn.initializers.zeros_init(),bias_init=nn.initializers.zeros_init(),dtype=self.dtype,)
def __call__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.conv_in(_a )
__lowerCAmelCase = nn.silu(_a )
for block in self.blocks:
__lowerCAmelCase = block(_a )
__lowerCAmelCase = nn.silu(_a )
__lowerCAmelCase = self.conv_out(_a )
return embedding
@flax_register_to_config
class _UpperCAmelCase ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
a : List[Any] =32
a : Any =4
a : Optional[Any] =(
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""DownBlock2D""",
)
a : List[str] =False
a : Dict =(3_20, 6_40, 12_80, 12_80)
a : Optional[int] =2
a : str =8
a : Optional[int] =None
a : Optional[int] =12_80
a : int =0.0
a : Dict =False
a : Tuple =jnp.floataa
a : List[str] =True
a : Tuple =0
a : int ="""rgb"""
a : List[str] =(16, 32, 96, 2_56)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowerCAmelCase = jnp.zeros(_a,dtype=jnp.floataa )
__lowerCAmelCase = jnp.ones((1,),dtype=jnp.intaa )
__lowerCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim),dtype=jnp.floataa )
__lowerCAmelCase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowerCAmelCase = jnp.zeros(_a,dtype=jnp.floataa )
__lowerCAmelCase = jax.random.split(_a )
__lowerCAmelCase = {"params": params_rng, "dropout": dropout_rng}
return self.init(_a,_a,_a,_a,_a )["params"]
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.block_out_channels
__lowerCAmelCase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowerCAmelCase = self.num_attention_heads or self.attention_head_dim
# input
__lowerCAmelCase = nn.Conv(
block_out_channels[0],kernel_size=(3, 3),strides=(1, 1),padding=((1, 1), (1, 1)),dtype=self.dtype,)
# time
__lowerCAmelCase = FlaxTimesteps(
block_out_channels[0],flip_sin_to_cos=self.flip_sin_to_cos,freq_shift=self.config.freq_shift )
__lowerCAmelCase = FlaxTimestepEmbedding(_a,dtype=self.dtype )
__lowerCAmelCase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0],block_out_channels=self.conditioning_embedding_out_channels,)
__lowerCAmelCase = self.only_cross_attention
if isinstance(_a,_a ):
__lowerCAmelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a,_a ):
__lowerCAmelCase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = block_out_channels[0]
__lowerCAmelCase = nn.Conv(
_a,kernel_size=(1, 1),padding="""VALID""",kernel_init=nn.initializers.zeros_init(),bias_init=nn.initializers.zeros_init(),dtype=self.dtype,)
controlnet_down_blocks.append(_a )
for i, down_block_type in enumerate(self.down_block_types ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowerCAmelCase = FlaxCrossAttnDownBlockaD(
in_channels=_a,out_channels=_a,dropout=self.dropout,num_layers=self.layers_per_block,num_attention_heads=num_attention_heads[i],add_downsample=not is_final_block,use_linear_projection=self.use_linear_projection,only_cross_attention=only_cross_attention[i],dtype=self.dtype,)
else:
__lowerCAmelCase = FlaxDownBlockaD(
in_channels=_a,out_channels=_a,dropout=self.dropout,num_layers=self.layers_per_block,add_downsample=not is_final_block,dtype=self.dtype,)
down_blocks.append(_a )
for _ in range(self.layers_per_block ):
__lowerCAmelCase = nn.Conv(
_a,kernel_size=(1, 1),padding="""VALID""",kernel_init=nn.initializers.zeros_init(),bias_init=nn.initializers.zeros_init(),dtype=self.dtype,)
controlnet_down_blocks.append(_a )
if not is_final_block:
__lowerCAmelCase = nn.Conv(
_a,kernel_size=(1, 1),padding="""VALID""",kernel_init=nn.initializers.zeros_init(),bias_init=nn.initializers.zeros_init(),dtype=self.dtype,)
controlnet_down_blocks.append(_a )
__lowerCAmelCase = down_blocks
__lowerCAmelCase = controlnet_down_blocks
# mid
__lowerCAmelCase = block_out_channels[-1]
__lowerCAmelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=_a,dropout=self.dropout,num_attention_heads=num_attention_heads[-1],use_linear_projection=self.use_linear_projection,dtype=self.dtype,)
__lowerCAmelCase = nn.Conv(
_a,kernel_size=(1, 1),padding="""VALID""",kernel_init=nn.initializers.zeros_init(),bias_init=nn.initializers.zeros_init(),dtype=self.dtype,)
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 1.0,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
__lowerCAmelCase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowerCAmelCase = jnp.flip(_a,axis=1 )
# 1. time
if not isinstance(_a,jnp.ndarray ):
__lowerCAmelCase = jnp.array([timesteps],dtype=jnp.intaa )
elif isinstance(_a,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps.astype(dtype=jnp.floataa )
__lowerCAmelCase = jnp.expand_dims(_a,0 )
__lowerCAmelCase = self.time_proj(_a )
__lowerCAmelCase = self.time_embedding(_a )
# 2. pre-process
__lowerCAmelCase = jnp.transpose(_a,(0, 2, 3, 1) )
__lowerCAmelCase = self.conv_in(_a )
__lowerCAmelCase = jnp.transpose(_a,(0, 2, 3, 1) )
__lowerCAmelCase = self.controlnet_cond_embedding(_a )
sample += controlnet_cond
# 3. down
__lowerCAmelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(_a,_a ):
__lowerCAmelCase = down_block(_a,_a,_a,deterministic=not train )
else:
__lowerCAmelCase = down_block(_a,_a,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowerCAmelCase = self.mid_block(_a,_a,_a,deterministic=not train )
# 5. contronet blocks
__lowerCAmelCase = ()
for down_block_res_sample, controlnet_block in zip(_a,self.controlnet_down_blocks ):
__lowerCAmelCase = controlnet_block(_a )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowerCAmelCase = controlnet_down_block_res_samples
__lowerCAmelCase = self.controlnet_mid_block(_a )
# 6. scaling
__lowerCAmelCase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_a,mid_block_res_sample=_a )
| 689 |
import math
def lowerCAmelCase_ ( _snake_case : int ) -> int:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 1:
__magic_name__ : int = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__magic_name__ : Optional[int] = int(math.log(number // 3 , 2 ) ) + 2
__magic_name__ : Dict = [3, 5]
__magic_name__ : Tuple = 2
__magic_name__ : Dict = 3
for block in range(1 , _snake_case ):
for _ in range(_snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
snake_case : Tuple = 0
try:
snake_case : Any = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}")
| 124 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Optional[Any] , snake_case : List[Any] )-> Optional[int]:
# Construct model
if gpta_config_file == "":
_lowerCamelCase = GPTaConfig()
else:
_lowerCamelCase = GPTaConfig.from_json_file(snake_case )
_lowerCamelCase = GPTaModel(snake_case )
# Load weights from numpy
load_tf_weights_in_gpta(snake_case , snake_case , snake_case )
# Save pytorch-model
_lowerCamelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowerCamelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , snake_case )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A_ : Union[str, Any] =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 222 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Tuple =logging.get_logger(__name__)
A_ : str ={
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = "roberta-prelayernorm"
def __init__( self , a__=5_02_65 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=None , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
_lowerCamelCase = classifier_dropout
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 222 | 1 |
import requests
_snake_case : Optional[Any] = "YOUR API KEY"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = giphy_api_key ):
__snake_case : Optional[int] = "+".join(query.split() )
__snake_case : List[Any] = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
__snake_case : Any = requests.get(__lowerCamelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 81 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase : int = [8, 5, 9, 7]
_UpperCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =claim_vector
lowercase =allocated_resources_table
lowercase =maximum_claim_table
def _A( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _A( self , **snake_case_ ):
lowercase =self.__need()
lowercase =self.__allocated_resources_table
lowercase =self.__available_resources()
lowercase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowercase =False
for each_need in need_list:
lowercase =True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
lowercase =False
break
if execution:
lowercase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
lowercase =np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _A( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 0 |
from __future__ import annotations
def __a ( __UpperCAmelCase : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowerCamelCase_ : Tuple = abs(__UpperCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCAmelCase )] for y in range(__UpperCAmelCase )]
def __a ( __UpperCAmelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(__UpperCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def __a ( __UpperCAmelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(__UpperCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __a ( __UpperCAmelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(__UpperCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def __a ( __UpperCAmelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowerCamelCase_ : Dict = [list(__UpperCAmelCase ) for x in zip(*__UpperCAmelCase )]
return matrix
def __a ( __UpperCAmelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowerCamelCase_ : int = matrix[::-1]
return matrix
def __a ( __UpperCAmelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowerCamelCase_ : Any = [x[::-1] for x in matrix]
return matrix
def __a ( __UpperCAmelCase : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*__UpperCAmelCase )
if __name__ == "__main__":
snake_case_ : List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
snake_case_ : Tuple = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
snake_case_ : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 710 |
class snake_case_ :
'''simple docstring'''
def __init__( self : str ) -> Optional[int]:
lowerCamelCase_ : Optional[Any] = ""
lowerCamelCase_ : Dict = ""
lowerCamelCase_ : Union[str, Any] = []
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int , __magic_name__ : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCamelCase_ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCamelCase_ : Dict = self.__min_dist_top_down_dp(__magic_name__ , n - 1 )
lowerCamelCase_ : List[str] = self.__min_dist_top_down_dp(m - 1 , __magic_name__ )
lowerCamelCase_ : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCamelCase_ : Dict = 1 + min(__magic_name__ , __magic_name__ , __magic_name__ )
return self.dp[m][n]
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str , __magic_name__ : str ) -> int:
lowerCamelCase_ : int = worda
lowerCamelCase_ : List[Any] = worda
lowerCamelCase_ : List[Any] = [[-1 for _ in range(len(__magic_name__ ) )] for _ in range(len(__magic_name__ ) )]
return self.__min_dist_top_down_dp(len(__magic_name__ ) - 1 , len(__magic_name__ ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : str ) -> int:
lowerCamelCase_ : List[Any] = worda
lowerCamelCase_ : Tuple = worda
lowerCamelCase_ : List[Any] = len(__magic_name__ )
lowerCamelCase_ : int = len(__magic_name__ )
lowerCamelCase_ : List[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCamelCase_ : Optional[int] = j
elif j == 0: # second string is empty
lowerCamelCase_ : Union[str, Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCamelCase_ : str = self.dp[i - 1][j - 1]
else:
lowerCamelCase_ : Optional[Any] = self.dp[i][j - 1]
lowerCamelCase_ : Optional[Any] = self.dp[i - 1][j]
lowerCamelCase_ : Optional[Any] = self.dp[i - 1][j - 1]
lowerCamelCase_ : int = 1 + min(__magic_name__ , __magic_name__ , __magic_name__ )
return self.dp[m][n]
if __name__ == "__main__":
snake_case_ : Dict = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
snake_case_ : Any = input("Enter the first string: ").strip()
snake_case_ : Tuple = input("Enter the second string: ").strip()
print()
print(f"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(f"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 253 | 0 |
import os
import numpy
import onnx
def _A( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
__lowercase = a.name
__lowercase = b.name
__lowercase = """"""
__lowercase = """"""
__lowercase = a == b
__lowercase = name_a
__lowercase = name_b
return res
def _A( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCAmelCase , _lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCAmelCase , _lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
def _A( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _A( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
__lowercase = list(model.graph.initializer )
__lowercase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase = inits[i].name
__lowercase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCAmelCase , _lowerCAmelCase )
def _A( UpperCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
__lowercase = os.path.dirname(_lowerCAmelCase )
__lowercase = os.path.basename(_lowerCAmelCase )
__lowercase = onnx.load(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = list(model.graph.initializer )
__lowercase = set()
__lowercase = {}
__lowercase = []
__lowercase = 0
for i in range(len(_lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCAmelCase )
dup_set.add(_lowerCAmelCase )
__lowercase = inits[j].data_type
__lowercase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , _lowerCAmelCase )
total_reduced_size += mem_size
__lowercase = inits[i].name
__lowercase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCAmelCase )
else:
__lowercase = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
__lowercase = sorted(_lowerCAmelCase )
_remove_dup_initializers_from_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = """optimized_""" + model_file_name
__lowercase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
onnx.save(_lowerCAmelCase , _lowerCAmelCase )
return new_model
| 332 |
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Any = [0 for i in range(r + 1 )]
# nc0 = 1
A_ : List[Any] = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
A_ : Tuple = min(_lowerCAmelCase ,_lowerCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 569 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ = RobertaTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_snake_case : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
_snake_case : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop("""type""" ) )
_snake_case : List[str] = add_prefix_space
_snake_case : Union[str, Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = add_prefix_space
_snake_case : Optional[Any] = """post_processor"""
_snake_case : Optional[Any] = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
_snake_case : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
_snake_case : int = tuple(state["""cls"""] )
_snake_case : Tuple = False
if state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
_snake_case : Tuple = add_prefix_space
_snake_case : Tuple = True
if state.get("""trim_offsets""" , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
_snake_case : str = trim_offsets
_snake_case : List[str] = True
if changes_to_apply:
_snake_case : Tuple = getattr(SCREAMING_SNAKE_CASE__ , state.pop("""type""" ) )
_snake_case : Tuple = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
_snake_case : Any = value
def __lowerCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : str = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : List[str] = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : Any = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
"""simple docstring"""
_snake_case : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 519 |
from collections.abc import Iterable
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar('''_T''')
class __SCREAMING_SNAKE_CASE ( Generic[_T] ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : list[_T] = list(iterable or [] )
_snake_case : list[_T] = []
def __len__( self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
"""simple docstring"""
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self._stacka.append(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[int] = self._stacka.pop
_snake_case : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 519 | 1 |
from __future__ import annotations
__UpperCAmelCase = []
def UpperCamelCase ( snake_case__ : list[list[int]] , snake_case__ : int , snake_case__ : int ) -> bool:
for i in range(len(snake_case__ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , len(snake_case__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCamelCase ( snake_case__ : list[list[int]] , snake_case__ : int ) -> bool:
if row >= len(snake_case__ ):
solution.append(snake_case__ )
printboard(snake_case__ )
print()
return True
for i in range(len(snake_case__ ) ):
if is_safe(snake_case__ , snake_case__ , snake_case__ ):
UpperCamelCase : int = 1
solve(snake_case__ , row + 1 )
UpperCamelCase : Dict = 0
return False
def UpperCamelCase ( snake_case__ : list[list[int]] ) -> None:
for i in range(len(snake_case__ ) ):
for j in range(len(snake_case__ ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
__UpperCAmelCase = 8
__UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 40 | from ...configuration_utils import PretrainedConfig
UpperCAmelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "tapas"
def __init__( self : List[Any] , A__ : str=3_05_22 , A__ : Tuple=7_68 , A__ : List[Any]=12 , A__ : Optional[Any]=12 , A__ : Union[str, Any]=30_72 , A__ : Dict="gelu" , A__ : List[Any]=0.1 , A__ : str=0.1 , A__ : List[Any]=10_24 , A__ : Optional[int]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A__ : Union[str, Any]=0.02 , A__ : Tuple=1E-12 , A__ : Tuple=0 , A__ : Any=10.0 , A__ : List[str]=0 , A__ : List[str]=1.0 , A__ : Optional[Any]=None , A__ : Tuple=1.0 , A__ : Union[str, Any]=False , A__ : Any=None , A__ : Union[str, Any]=1.0 , A__ : int=1.0 , A__ : str=False , A__ : int=False , A__ : Optional[Any]="ratio" , A__ : str=None , A__ : int=None , A__ : Dict=64 , A__ : int=32 , A__ : Optional[Any]=False , A__ : List[str]=True , A__ : List[Any]=False , A__ : str=False , A__ : Any=True , A__ : Tuple=False , A__ : str=None , A__ : str=None , **A__ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case_ : int = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : List[Any] = type_vocab_sizes
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case_ : Optional[int] = positive_label_weight
snake_case_ : Dict = num_aggregation_labels
snake_case_ : List[str] = aggregation_loss_weight
snake_case_ : str = use_answer_as_supervision
snake_case_ : int = answer_loss_importance
snake_case_ : Any = use_normalized_answer_loss
snake_case_ : int = huber_loss_delta
snake_case_ : List[Any] = temperature
snake_case_ : str = aggregation_temperature
snake_case_ : List[str] = use_gumbel_for_cells
snake_case_ : List[str] = use_gumbel_for_aggregation
snake_case_ : Dict = average_approximation_function
snake_case_ : List[str] = cell_selection_preference
snake_case_ : Dict = answer_loss_cutoff
snake_case_ : List[str] = max_num_rows
snake_case_ : Union[str, Any] = max_num_columns
snake_case_ : str = average_logits_per_cell
snake_case_ : Union[str, Any] = select_one_column
snake_case_ : Dict = allow_empty_column_selection
snake_case_ : List[Any] = init_cell_selection_weights_to_zero
snake_case_ : str = reset_position_index_per_cell
snake_case_ : List[Any] = disable_per_token_loss
# Aggregation hyperparameters
snake_case_ : List[str] = aggregation_labels
snake_case_ : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A__ ):
snake_case_ : Optional[int] = {int(A__ ): v for k, v in aggregation_labels.items()}
| 666 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__UpperCAmelCase : List[str] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[Any] = 'ernie_m'
UpperCAmelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[Any] , __snake_case : int = 250002 , __snake_case : int = 768 , __snake_case : int = 12 , __snake_case : int = 12 , __snake_case : int = 3072 , __snake_case : str = "gelu" , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : int = 514 , __snake_case : float = 0.02 , __snake_case : int = 1 , __snake_case : float = 1E-0_5 , __snake_case : str=None , __snake_case : Tuple=False , __snake_case : List[str]=0.0 , **__snake_case : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__snake_case , **__snake_case )
_a : int = vocab_size
_a : Tuple = hidden_size
_a : Tuple = num_hidden_layers
_a : Any = num_attention_heads
_a : str = intermediate_size
_a : Dict = hidden_act
_a : Any = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : Optional[int] = max_position_embeddings
_a : List[Any] = initializer_range
_a : Tuple = layer_norm_eps
_a : Any = classifier_dropout
_a : List[str] = is_decoder
_a : str = act_dropout
| 705 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__UpperCAmelCase : Dict = logging.getLogger(__name__)
__UpperCAmelCase : Dict = 50 # max width of layer names
__UpperCAmelCase : List[Any] = 70 # max width of quantizer names
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Union[str, Any] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=UpperCamelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=UpperCamelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=UpperCamelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=UpperCamelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=UpperCamelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=UpperCamelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase_ ( UpperCamelCase_ ):
if args.calibrator == "max":
_a : List[str] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_a : List[str] = '''histogram'''
elif args.calibrator == "mse":
_a : str = '''histogram'''
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
_a : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=UpperCamelCase_ )
_a : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(UpperCamelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(UpperCamelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=UpperCamelCase_ )
if args.quant_disable:
set_quantizer_by_name(UpperCamelCase_ , [''''''] , _disabled=UpperCamelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(UpperCamelCase_ , args.quant_disable_keyword , _disabled=UpperCamelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(UpperCamelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=UpperCamelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(UpperCamelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=UpperCamelCase_ )
if args.recalibrate_weights:
recalibrate_weights(UpperCamelCase_ )
if args.fuse_qkv:
fuse_qkv(UpperCamelCase_ , UpperCamelCase_ )
if args.clip_gelu:
clip_gelu(UpperCamelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
def fusea(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(UpperCamelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_a : List[Any] = qq._amax.detach().item()
_a : Any = qk._amax.detach().item()
_a : Tuple = qv._amax.detach().item()
_a : str = max(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
qq._amax.fill_(UpperCamelCase_ )
qk._amax.fill_(UpperCamelCase_ )
qv._amax.fill_(UpperCamelCase_ )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_a : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=UpperCamelCase_ )
_a : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase_ ( UpperCamelCase_ ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_a : str = mod.weight.shape[0]
_a : Any = mod._weight_quantizer._amax.detach()
_a : Optional[int] = torch.ones(UpperCamelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase_ ( UpperCamelCase_ ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_a : List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_a : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
_a : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=UpperCamelCase_ , keepdims=UpperCamelCase_ ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
_a : List[Any] = amax
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=25 , UpperCamelCase_=180 , UpperCamelCase_=None ):
if ignore is None:
_a : Any = []
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a : int = [ignore]
_a : Any = 0
for name, mod in model.named_modules():
if not hasattr(UpperCamelCase_ , '''weight''' ):
continue
_a : str = max(UpperCamelCase_ , len(UpperCamelCase_ ) )
for name, mod in model.named_modules():
_a : Any = getattr(UpperCamelCase_ , '''_input_quantizer''' , UpperCamelCase_ )
_a : int = getattr(UpperCamelCase_ , '''_weight_quantizer''' , UpperCamelCase_ )
if not hasattr(UpperCamelCase_ , '''weight''' ):
continue
if type(UpperCamelCase_ ) in ignore:
continue
if [True for s in ignore if type(UpperCamelCase_ ) is str and s in name]:
continue
_a : Any = f"""Act:{input_q.extra_repr()}"""
_a : List[str] = f"""Wgt:{weight_q.extra_repr()}"""
_a : Optional[Any] = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(UpperCamelCase_ ) <= line_width:
logger.info(UpperCamelCase_ )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{" ":{name_width}} {wgt_str}""" )
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(UpperCamelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : List[str] = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if quantizer_mod is not None:
assert hasattr(UpperCamelCase_ , UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="both" , **UpperCamelCase_ ):
_a : List[Any] = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(UpperCamelCase_ , UpperCamelCase_ , '''_input_quantizer''' , UpperCamelCase_ , UpperCamelCase_ )
if which in ["weight", "both"]:
set_quantizer(UpperCamelCase_ , UpperCamelCase_ , '''_weight_quantizer''' , UpperCamelCase_ , UpperCamelCase_ )
logger.info(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase_ , '''_input_quantizer''' ) or hasattr(UpperCamelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(UpperCamelCase_ , UpperCamelCase_ ):
set_quantizers(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(UpperCamelCase_ , UpperCamelCase_ ):
_a : List[Any] = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
logger.info(UpperCamelCase_ )
| 249 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a = logging.getLogger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Tuple=-1 ):
# in NER datasets, the last column is usually reserved for NER label
_A = label_idx
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[Split, str] ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = mode.value
_A = os.path.join(_UpperCAmelCase , F'''{mode}.txt''' )
_A = 1
_A = []
with open(_UpperCAmelCase , encoding='utf-8' ) as f:
_A = []
_A = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_UpperCAmelCase , labels=_UpperCAmelCase ) )
guid_index += 1
_A = []
_A = []
else:
_A = line.split(' ' )
words.append(splits[0] )
if len(_UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_UpperCAmelCase , labels=_UpperCAmelCase ) )
return examples
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : TextIO , _UpperCAmelCase : TextIO , _UpperCAmelCase : List ):
_A = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(_UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_A = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_UpperCAmelCase )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str ):
if path:
with open(_UpperCAmelCase , 'r' ) as f:
_A = f.read().splitlines()
if "O" not in labels:
_A = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str ):
if path:
with open(_UpperCAmelCase , 'r' ) as f:
_A = f.read().splitlines()
if "O" not in labels:
_A = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[Split, str] ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = mode.value
_A = os.path.join(_UpperCAmelCase , F'''{mode}.txt''' )
_A = 1
_A = []
with open(_UpperCAmelCase , encoding='utf-8' ) as f:
for sentence in parse_incr(_UpperCAmelCase ):
_A = []
_A = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_UpperCAmelCase , labels=_UpperCAmelCase ) )
guid_index += 1
return examples
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : TextIO , _UpperCAmelCase : TextIO , _UpperCAmelCase : List ):
_A = 0
for sentence in parse_incr(_UpperCAmelCase ):
_A = preds_list[example_id]
_A = ''
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_UpperCAmelCase )
example_id += 1
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str ):
if path:
with open(_UpperCAmelCase , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 7 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCAmelCase__ : str = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case_ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(snake_case_ )
def __magic_name__ ( self : Dict , **snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : Tuple = {}
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case__ : Any = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
snake_case__ : List[str] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
snake_case__ : int = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
snake_case__ : int = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
snake_case__ : List[str] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case__ : Union[str, Any] = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
snake_case__ : int = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
snake_case__ : Optional[Any] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
snake_case__ : List[str] = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
snake_case__ : int = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
snake_case__ : str = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
snake_case__ : Union[str, Any] = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : str , snake_case_ : Optional[int] , *snake_case_ : Any , snake_case_ : str=None , snake_case_ : List[Any]=None , **snake_case_ : List[Any] ):
'''simple docstring'''
return super().__call__(snake_case_ , *snake_case_ , num_workers=snake_case_ , batch_size=snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , snake_case_ : Any , snake_case_ : Optional[Any]=6_4 , snake_case_ : int = 0 , snake_case_ : float = 5_1_2 / 1_5_0_0 , snake_case_ : Optional[int] = 3_2 , snake_case_ : Optional[int] = 1 , ):
'''simple docstring'''
snake_case__ : Dict = load_image(snake_case_ )
snake_case__ : Optional[int] = self.image_processor.size['''longest_edge''']
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.image_processor.generate_crop_boxes(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self.image_processor(images=snake_case_ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
snake_case__ : List[str] = self.get_inference_context()
with inference_context():
snake_case__ : Dict = self._ensure_tensor_on_device(snake_case_ , device=self.device )
snake_case__ : Tuple = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
snake_case__ : str = image_embeddings
snake_case__ : Dict = grid_points.shape[1]
snake_case__ : int = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , snake_case_ , snake_case_ ):
snake_case__ : str = grid_points[:, i : i + points_per_batch, :, :]
snake_case__ : Optional[Any] = input_labels[:, i : i + points_per_batch]
snake_case__ : List[str] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __magic_name__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : Optional[Any]=0.8_8 , snake_case_ : Dict=0.9_5 , snake_case_ : List[str]=0 , snake_case_ : Dict=1 , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = model_inputs.pop('''input_boxes''' )
snake_case__ : Union[str, Any] = model_inputs.pop('''is_last''' )
snake_case__ : List[str] = model_inputs.pop('''original_sizes''' ).tolist()
snake_case__ : int = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
snake_case__ : List[Any] = self.model(**snake_case_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case__ : Optional[int] = model_outputs['''pred_masks''']
snake_case__ : Optional[Any] = self.image_processor.post_process_masks(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , binarize=snake_case_ )
snake_case__ : str = model_outputs['''iou_scores''']
snake_case__ , snake_case__ , snake_case__ : Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __magic_name__ ( self : List[str] , snake_case_ : Optional[int] , snake_case_ : List[str]=False , snake_case_ : int=False , snake_case_ : Tuple=0.7 , ):
'''simple docstring'''
snake_case__ : Tuple = []
snake_case__ : str = []
snake_case__ : Optional[int] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
snake_case__ : Union[str, Any] = torch.cat(snake_case_ )
snake_case__ : Dict = torch.cat(snake_case_ )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self.image_processor.post_process_for_mask_generation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Tuple = defaultdict(snake_case_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case_ )
snake_case__ : str = {}
if output_rle_mask:
snake_case__ : Union[str, Any] = rle_mask
if output_bboxes_mask:
snake_case__ : Union[str, Any] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 347 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
def decorator(_lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE__ : Tuple = getattr(_lowerCamelCase , "handle_key" , [] )
handle += [key]
setattr(_lowerCamelCase , "handle_key" , _lowerCamelCase )
return func
return decorator
def UpperCAmelCase ( *_lowerCamelCase : List[str] ):
'''simple docstring'''
def decorator(_lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(_lowerCamelCase , "handle_key" , [] )
handle += keys
setattr(_lowerCamelCase , "handle_key" , _lowerCamelCase )
return func
return decorator
class _a ( lowercase__ ):
"""simple docstring"""
def __new__( cls : Any , a : Any , a : Any , a : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[str] = super().__new__(cls , a , a , a )
if not hasattr(a , "key_handler" ):
setattr(a , "key_handler" , {} )
setattr(a , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(a , "handle_key" , [] )
for key in handled_keys:
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return new_cls
@staticmethod
def A_ ( cls : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_character()
if char != KEYMAP["undefined"]:
SCREAMING_SNAKE_CASE__ : Dict = ord(a )
SCREAMING_SNAKE_CASE__ : Tuple = cls.key_handler.get(a )
if handler:
SCREAMING_SNAKE_CASE__ : int = char
return handler(cls )
else:
return None
def UpperCAmelCase ( cls : Union[str, Any] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 705 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , ) | 26 | 0 |
def UpperCAmelCase_ (_lowerCAmelCase : int ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__UpperCamelCase : Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__UpperCamelCase : Optional[int] = 1
if upper_limit > 0:
__UpperCamelCase : Tuple = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
lowercase : int = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod() | 327 |
def UpperCAmelCase_ (_lowerCAmelCase : int = 60_08_51_47_51_43 ):
try:
__UpperCamelCase : Optional[Any] = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__UpperCamelCase : List[Any] = 2
__UpperCamelCase : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__UpperCamelCase : int = i
while n % i == 0:
__UpperCamelCase : Optional[Any] = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""") | 327 | 1 |
import sys
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
_snake_case = len(__A )
_snake_case = [[0 for x in range(__A )] for x in range(__A )]
_snake_case = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
_snake_case = a + chain_length - 1
_snake_case = sys.maxsize
for c in range(__A , __A ):
_snake_case = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_snake_case = cost
_snake_case = c
return matrix, sol
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[Any]:
if i == j:
print('A' + str(__A ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(')' , end=' ' )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_snake_case = [30, 35, 15, 5, 10, 20, 25]
_snake_case = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_snake_case , _snake_case = matrix_chain_order(__A )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 705 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=_lowerCamelCase ):
__lowercase = ["""torch""", """torchsde"""]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(cls , ['torch', 'torchsde'] )
| 542 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( a , a , a , a="attention" ):
__snake_case = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
__snake_case = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
__snake_case = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
__snake_case = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def lowerCamelCase__ ( a , a , a , a=False ):
if split_mlp_wi:
__snake_case = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
__snake_case = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
__snake_case = (wi_a, wi_a)
else:
__snake_case = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
__snake_case = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def lowerCamelCase__ ( a , a , a , a ):
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def lowerCamelCase__ ( a , *, a , a ):
__snake_case = traverse_util.flatten_dict(variables['target'] )
__snake_case = {'/'.join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__snake_case = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , a )
__snake_case = collections.OrderedDict()
# Shared embeddings.
__snake_case = old['token_embedder/embedding']
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__snake_case = tax_layer_norm_lookup(a , a , 'encoder' , 'pre_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(a , a , 'encoder' , 'attention' )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 1 (MLP).
__snake_case = tax_layer_norm_lookup(a , a , 'encoder' , 'pre_mlp_layer_norm' )
__snake_case , __snake_case = tax_mlp_lookup(a , a , 'encoder' , a )
__snake_case = layer_norm
if split_mlp_wi:
__snake_case = wi[0].T
__snake_case = wi[1].T
else:
__snake_case = wi.T
__snake_case = wo.T
__snake_case = old[
'encoder/relpos_bias/rel_embedding'
].T
__snake_case = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__snake_case = tax_layer_norm_lookup(a , a , 'decoder' , 'pre_self_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(a , a , 'decoder' , 'self_attention' )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 1 (Cross Attention).
__snake_case = tax_layer_norm_lookup(a , a , 'decoder' , 'pre_cross_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(a , a , 'decoder' , 'encoder_decoder_attention' )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 2 (MLP).
__snake_case = tax_layer_norm_lookup(a , a , 'decoder' , 'pre_mlp_layer_norm' )
__snake_case , __snake_case = tax_mlp_lookup(a , a , 'decoder' , a )
__snake_case = layer_norm
if split_mlp_wi:
__snake_case = wi[0].T
__snake_case = wi[1].T
else:
__snake_case = wi.T
__snake_case = wo.T
__snake_case = old['decoder/decoder_norm/scale']
__snake_case = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__snake_case = old['decoder/logits_dense/kernel'].T
return new
def lowerCamelCase__ ( a , a ):
__snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__snake_case = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__snake_case = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__snake_case = state_dict['shared.weight']
return state_dict
def lowerCamelCase__ ( a , a , a , a ):
__snake_case = checkpoints.load_tax_checkpoint(a )
__snake_case = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__snake_case = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def lowerCamelCase__ ( a , a , a , a = False ):
__snake_case = TaConfig.from_json_file(a )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__snake_case = TaEncoderModel(a )
else:
__snake_case = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print('Done' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
_lowercase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 356 |
'''simple docstring'''
def lowerCamelCase__ ( a ):
__snake_case = int(a )
if n_element < 1:
__snake_case = ValueError('a should be a positive number' )
raise my_error
__snake_case = [1]
__snake_case , __snake_case , __snake_case = (0, 0, 0)
__snake_case = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowercase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
_lowercase = hamming(int(n))
print("""-----------------------------------------------------""")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("""-----------------------------------------------------""")
| 356 | 1 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
a__: Tuple = threading.Lock()
a__: Optional[logging.Handler] = None
a__: List[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
a__: Tuple = logging.WARNING
a__: Optional[int] = True
def UpperCamelCase__( )->int:
A__ = os.getenv('''TRANSFORMERS_VERBOSITY''' , UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def UpperCamelCase__( )->str:
return __name__.split('''.''' )[0]
def UpperCamelCase__( )->logging.Logger:
return logging.getLogger(_get_library_name() )
def UpperCamelCase__( )->None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
A__ = logging.StreamHandler() # Set sys.stderr as stream.
A__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
A__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
A__ = False
def UpperCamelCase__( )->None:
global _default_handler
with _lock:
if not _default_handler:
return
A__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
A__ = None
def UpperCamelCase__( )->Tuple:
return log_levels
def UpperCamelCase__( UpperCamelCase__ : Optional[str] = None )->logging.Logger:
if name is None:
A__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase__ )
def UpperCamelCase__( )->int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase__( UpperCamelCase__ : int )->None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase__ )
def UpperCamelCase__( )->str:
return set_verbosity(UpperCamelCase__ )
def UpperCamelCase__( )->List[str]:
return set_verbosity(UpperCamelCase__ )
def UpperCamelCase__( )->int:
return set_verbosity(UpperCamelCase__ )
def UpperCamelCase__( )->Dict:
return set_verbosity(UpperCamelCase__ )
def UpperCamelCase__( )->None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCamelCase__( )->None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCamelCase__( UpperCamelCase__ : logging.Handler )->None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : logging.Handler )->None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase__ )
def UpperCamelCase__( )->None:
_configure_library_root_logger()
A__ = False
def UpperCamelCase__( )->None:
_configure_library_root_logger()
A__ = True
def UpperCamelCase__( )->None:
A__ = _get_library_root_logger().handlers
for handler in handlers:
A__ = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(UpperCamelCase__ )
def UpperCamelCase__( )->None:
A__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase__ )
def UpperCamelCase__( self : Optional[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple )->Tuple:
A__ = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , UpperCamelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase__ , **UpperCamelCase__ )
a__: Dict = warning_advice
@functools.lru_cache(UpperCamelCase__ )
def UpperCamelCase__( self : Optional[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] )->Optional[Any]:
self.warning(*UpperCamelCase__ , **UpperCamelCase__ )
a__: Any = warning_once
class SCREAMING_SNAKE_CASE__ :
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ): # pylint: disable=unused-argument
A__ = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self,__lowerCamelCase ):
def empty_fn(*__lowerCamelCase,**__lowerCamelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
return
class SCREAMING_SNAKE_CASE__ :
def __call__( self,*__lowerCamelCase,**__lowerCamelCase ):
if _tqdm_active:
return tqdm_lib.tqdm(*__lowerCamelCase,**__lowerCamelCase )
else:
return EmptyTqdm(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
A__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a__: str = _tqdm_cls()
def UpperCamelCase__( )->bool:
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase__( )->Dict:
global _tqdm_active
A__ = True
hf_hub_utils.enable_progress_bars()
def UpperCamelCase__( )->Optional[int]:
global _tqdm_active
A__ = False
hf_hub_utils.disable_progress_bars()
| 721 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__: Any = random.Random()
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : str=1.0 , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None )->Any:
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self,__lowerCamelCase,__lowerCamelCase=7,__lowerCamelCase=400,__lowerCamelCase=2000,__lowerCamelCase=2048,__lowerCamelCase=128,__lowerCamelCase=1,__lowerCamelCase=512,__lowerCamelCase=30,__lowerCamelCase=4_4100,):
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def UpperCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase ( self,__lowerCamelCase=False,__lowerCamelCase=False ):
def _flatten(__lowerCamelCase ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = TvltFeatureExtractor
def UpperCamelCase ( self ):
A__ = TvltFeatureExtractionTester(self )
def UpperCamelCase ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase,'''spectrogram_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''feature_size''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''num_audio_channels''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''hop_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''chunk_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''sampling_rate''' ) )
def UpperCamelCase ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(__lowerCamelCase )[0]
check_json_file_has_correct_format(__lowerCamelCase )
A__ = self.feature_extraction_class.from_pretrained(__lowerCamelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('''mel_filters''' )
A__ = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(__lowerCamelCase,'''feat_extract.json''' )
feat_extract_first.to_json_file(__lowerCamelCase )
A__ = self.feature_extraction_class.from_json_file(__lowerCamelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('''mel_filters''' )
A__ = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
# Initialize feature_extractor
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(800,1400,200 )]
A__ = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0],return_tensors='''np''',sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(__lowerCamelCase,return_tensors='''np''',sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
__lowerCamelCase,return_tensors='''np''',sampling_rate=4_4100,mask_audio=__lowerCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ = np.asarray(__lowerCamelCase )
A__ = feature_extractor(__lowerCamelCase,return_tensors='''np''',sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''','''clean''',split='''validation''' )
# automatic decoding with librispeech
A__ = ds.sort('''id''' ).select(range(__lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self ):
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(__lowerCamelCase,return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape,(1, 1, 192, 128) )
A__ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2],__lowerCamelCase,atol=1E-4 ) )
| 212 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a_ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = create_model(
"HTSAT-tiny" , "roberta" , _a , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_a , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Optional[Any] = r".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE : int = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE : Any = key.replace(_a , _a)
if re.match(_a , _a):
# replace sequential layers with list
SCREAMING_SNAKE_CASE : Optional[int] = re.match(_a , _a).group(1)
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_a)//3}.linear.")
elif re.match(_a , _a):
SCREAMING_SNAKE_CASE : str = int(re.match(_a , _a).group(1))
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE : List[str] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}.")
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE : Optional[Any] = value
SCREAMING_SNAKE_CASE : Dict = mixed_qkv.size(0) // 3
SCREAMING_SNAKE_CASE : Dict = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE : Dict = query_layer
SCREAMING_SNAKE_CASE : str = key_layer
SCREAMING_SNAKE_CASE : List[str] = value_layer
else:
SCREAMING_SNAKE_CASE : Any = value
return model_state_dict
def lowerCamelCase__ ( _a , _a , _a , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = init_clap(_a , enable_fusion=_a)
clap_model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = clap_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(_a)
SCREAMING_SNAKE_CASE : Any = ClapConfig()
SCREAMING_SNAKE_CASE : Tuple = enable_fusion
SCREAMING_SNAKE_CASE : str = ClapModel(_a)
# ignore the spectrogram embedding layer
model.load_state_dict(_a , strict=_a)
model.save_pretrained(_a)
transformers_config.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 25 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 1 |
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = val
lowercase_ :int = None
lowercase_ :Any = None
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.val:
if val < self.val:
if self.left is None:
lowercase_ :Tuple = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase_ :int = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase_ :Optional[int] = val
def UpperCamelCase ( _a , _a ) -> Dict:
'''simple docstring'''
if root:
inorder(root.left , _a )
res.append(root.val )
inorder(root.right , _a )
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
if len(_a ) == 0:
return arr
lowercase_ :str = Node(arr[0] )
for i in range(1 , len(_a ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase_ :List[str] = []
inorder(_a , _a )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 441 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , '''decord''' )
self.check_model_type(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ):
lowercase_ :int = {}
if frame_sampling_rate is not None:
lowercase_ :int = frame_sampling_rate
if num_frames is not None:
lowercase_ :int = num_frames
lowercase_ :str = {}
if top_k is not None:
lowercase_ :Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCamelCase_ , **UpperCamelCase_ ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=1 ):
if num_frames is None:
lowercase_ :str = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowercase_ :str = BytesIO(requests.get(UpperCamelCase_ ).content )
lowercase_ :Optional[int] = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
lowercase_ :Tuple = 0
lowercase_ :Optional[Any] = num_frames * frame_sampling_rate - 1
lowercase_ :Any = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
lowercase_ :Dict = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
lowercase_ :List[Any] = list(UpperCamelCase_ )
lowercase_ :Any = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[str] = self.model(**UpperCamelCase_ )
return model_outputs
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=5 ):
if top_k > self.model.config.num_labels:
lowercase_ :List[str] = self.model.config.num_labels
if self.framework == "pt":
lowercase_ :Optional[int] = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ :Dict = probs.topk(UpperCamelCase_ )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
lowercase_ :Dict = scores.tolist()
lowercase_ :Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 441 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ : str = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = """deta"""
A__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case__=None , snake_case__=900 , snake_case__=2048 , snake_case__=6 , snake_case__=2048 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__=5 , snake_case__=4 , snake_case__=4 , snake_case__=True , snake_case__=300 , snake_case__=True , snake_case__=True , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.25 , **snake_case__ , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_SCREAMING_SNAKE_CASE : Any = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : List[Any] = backbone_config.pop("model_type" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE : int = config_class.from_dict(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
_SCREAMING_SNAKE_CASE : List[str] = num_queries
_SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
_SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim
_SCREAMING_SNAKE_CASE : List[Any] = encoder_layers
_SCREAMING_SNAKE_CASE : List[str] = encoder_attention_heads
_SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
_SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layers
_SCREAMING_SNAKE_CASE : Tuple = decoder_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : List[Any] = activation_dropout
_SCREAMING_SNAKE_CASE : Optional[Any] = activation_function
_SCREAMING_SNAKE_CASE : Any = init_std
_SCREAMING_SNAKE_CASE : Tuple = init_xavier_std
_SCREAMING_SNAKE_CASE : Optional[int] = encoder_layerdrop
_SCREAMING_SNAKE_CASE : Any = auxiliary_loss
_SCREAMING_SNAKE_CASE : int = position_embedding_type
# deformable attributes
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_feature_levels
_SCREAMING_SNAKE_CASE : str = encoder_n_points
_SCREAMING_SNAKE_CASE : Optional[Any] = decoder_n_points
_SCREAMING_SNAKE_CASE : Optional[int] = two_stage
_SCREAMING_SNAKE_CASE : Dict = two_stage_num_proposals
_SCREAMING_SNAKE_CASE : Optional[int] = with_box_refine
_SCREAMING_SNAKE_CASE : Tuple = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_SCREAMING_SNAKE_CASE : Any = class_cost
_SCREAMING_SNAKE_CASE : str = bbox_cost
_SCREAMING_SNAKE_CASE : Optional[Any] = giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE : Tuple = mask_loss_coefficient
_SCREAMING_SNAKE_CASE : Optional[Any] = dice_loss_coefficient
_SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
_SCREAMING_SNAKE_CASE : Any = giou_loss_coefficient
_SCREAMING_SNAKE_CASE : List[Any] = eos_coefficient
_SCREAMING_SNAKE_CASE : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.d_model
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE : int = self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type
return output
| 572 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : Any = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = """timesformer"""
def __init__( self , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=8 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-6 , snake_case__=True , snake_case__="divided_space_time" , snake_case__=0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : Optional[int] = patch_size
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : Any = num_frames
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Any = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = qkv_bias
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_type
_SCREAMING_SNAKE_CASE : str = drop_path_rate
| 572 | 1 |
import os
from pathlib import Path
def UpperCAmelCase__ ( ) -> Tuple:
from torch.utils.cpp_extension import load
__lowercase = Path(lowercase__ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowercase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , lowercase__ , with_cuda=lowercase__ , extra_include_paths=[str(lowercase__ )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 634 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase__ = random.Random()
def UpperCAmelCase__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> str:
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Tuple , lowercase : Union[str, Any]=7 , lowercase : List[Any]=400 , lowercase : Any=2_000 , lowercase : Optional[int]=24 , lowercase : Any=24 , lowercase : List[str]=0.0 , lowercase : Dict=16_000 , lowercase : Union[str, Any]=True , lowercase : Dict=True , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = num_mel_bins
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : List[str] , lowercase : Tuple=False , lowercase : int=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(lowercase : Optional[Any] ):
return list(itertools.chain(*lowercase ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = SpeechaTextFeatureExtractor if is_speech_available() else None
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextFeatureExtractionTester(self )
def snake_case__ ( self : Tuple , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowercase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0 ) - 1 ) < 1E-3 ) )
def snake_case__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase , padding=lowercase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowercase = np.asarray(lowercase )
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , padding=lowercase , max_length=lowercase , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , max_length=lowercase , padding=lowercase , return_tensors="""np""" , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""max_length""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=16 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self : Optional[int] , lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
from datasets import load_dataset
__lowercase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__lowercase = ds.sort("""id""" ).select(range(lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = feature_extractor(lowercase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase , atol=1E-4 ) )
| 634 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Any = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : Optional[int] = 25_0004
A_ : Any = 25_0020
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = MBartTokenizer
_SCREAMING_SNAKE_CASE : str = MBartTokenizerFast
_SCREAMING_SNAKE_CASE : Optional[Any] = True
_SCREAMING_SNAKE_CASE : List[str] = True
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = MBartTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : List[Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Any = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Optional[int] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = '''facebook/mbart-large-en-ro'''
_SCREAMING_SNAKE_CASE : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_SCREAMING_SNAKE_CASE : Any = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_SCREAMING_SNAKE_CASE : Tuple = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def _lowerCAmelCase ( cls : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
SCREAMING_SNAKE_CASE : str = 1
return cls
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250_020 )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : int = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = 10
SCREAMING_SNAKE_CASE : int = self.tokenizer(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250_026, 250_001] )
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = MBartTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _SCREAMING_SNAKE_CASE )
@require_torch
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
SCREAMING_SNAKE_CASE : Tuple = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
SCREAMING_SNAKE_CASE : Tuple = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.tokenizer(self.src_text , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=3 , return_tensors='pt' )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=10 , return_tensors='pt' )
SCREAMING_SNAKE_CASE : Optional[Any] = targets['input_ids']
SCREAMING_SNAKE_CASE : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3_034, 2, 250_004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} , )
| 265 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'train'
_SCREAMING_SNAKE_CASE = 'dev'
_SCREAMING_SNAKE_CASE = 'test'
class lowercase :
@staticmethod
def _snake_case ( lowercase , lowercase ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _snake_case ( lowercase ) -> List[str]:
raise NotImplementedError
@staticmethod
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase=False , lowercase="[CLS]" , lowercase=1 , lowercase="[SEP]" , lowercase=False , lowercase=False , lowercase=0 , lowercase=0 , lowercase=-100 , lowercase=0 , lowercase=True , ) -> List[InputFeatures]:
lowerCAmelCase = {label: i for i, label in enumerate(lowercase )}
lowerCAmelCase = []
for ex_index, example in enumerate(lowercase ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" , lowercase , len(lowercase ) )
lowerCAmelCase = []
lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
lowerCAmelCase = tokenizer.tokenize(lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase ) > 0:
tokens.extend(lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(lowercase ) > max_seq_length - special_tokens_count:
lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase = [sequence_a_segment_id] * len(lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase = [cls_token] + tokens
lowerCAmelCase = [pad_token_label_id] + label_ids
lowerCAmelCase = [cls_token_segment_id] + segment_ids
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(lowercase )
# Zero-pad up to the sequence length.
lowerCAmelCase = max_seq_length - len(lowercase )
if pad_on_left:
lowerCAmelCase = ([pad_token] * padding_length) + input_ids
lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(lowercase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(lowercase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(lowercase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(lowercase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=lowercase , attention_mask=lowercase , token_type_ids=lowercase , label_ids=lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = nn.CrossEntropyLoss().ignore_index
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ) -> List[str]:
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
lowercase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
lowerCAmelCase = torch.load(lowercase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
lowerCAmelCase = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , lowercase )
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = -100
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ) -> Any:
lowerCAmelCase = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase = tf.data.Dataset.from_generator(
lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCAmelCase = tf.data.Dataset.from_generator(
lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
return self.features[i]
| 532 | 0 |
from manim import *
class A ( lowerCamelCase_ ):
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_ = Rectangle(height=0.25 , width=0.25 )
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = Text('CPU' , font_size=24 )
UpperCamelCase_ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCamelCase_ = [mem.copy() for i in range(4 )]
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = Text('GPU' , font_size=24 )
UpperCamelCase_ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = Text('Model' , font_size=24 )
UpperCamelCase_ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCamelCase_ = []
UpperCamelCase_ = []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCamelCase_ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.8 )
target.move_to(__UpperCAmelCase )
model_arr.append(__UpperCAmelCase )
UpperCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCamelCase_ = [meta_mem.copy() for i in range(6 )]
UpperCamelCase_ = [meta_mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = Text('Disk' , font_size=24 )
UpperCamelCase_ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCamelCase_ = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) )
UpperCamelCase_ = Square(0.3 )
input.set_fill(__UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __UpperCAmelCase , buff=0.5 )
self.play(Write(__UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(__UpperCAmelCase ) )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCamelCase_ = Arrow(start=__UpperCAmelCase , end=__UpperCAmelCase , color=__UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCamelCase_ = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
UpperCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__UpperCAmelCase ) , Circumscribe(model_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCamelCase_ = AnimationGroup(
FadeOut(__UpperCAmelCase , run_time=0.5 ) , MoveToTarget(__UpperCAmelCase , run_time=0.5 ) , FadeIn(__UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCamelCase_ = a_c
UpperCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__UpperCAmelCase ) , FadeOut(__UpperCAmelCase , run_time=0.5 ) , )
UpperCamelCase_ = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , MoveToTarget(__UpperCAmelCase ) )
self.wait()
| 721 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]=13 , __UpperCAmelCase : str=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=99 , __UpperCAmelCase : Union[str, Any]=16 , __UpperCAmelCase : Union[str, Any]=36 , __UpperCAmelCase : Optional[int]=6 , __UpperCAmelCase : Union[str, Any]=6 , __UpperCAmelCase : List[str]=6 , __UpperCAmelCase : Union[str, Any]=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = embedding_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_hidden_groups
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowercase__ ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = AlbertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ = AlbertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , sentence_order_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = AlbertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = AlbertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = AlbertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_choices
UpperCamelCase_ = AlbertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : int = True
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
UpperCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
UpperCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = AlbertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertModel.from_pretrained('albert-base-v2' )
UpperCamelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
UpperCamelCase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) )
| 559 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
a_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
a_ = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowerCAmelCase_ ) , torch_builtin(lowerCAmelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCAmelCase_ ) , gelu_new(lowerCAmelCase_ ) ) )
def _a ( self ):
"""simple docstring"""
a_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
a_ = get_activation('gelu' )
a_ = get_activation('gelu_10' )
a_ = torch_builtin(lowerCAmelCase_ )
a_ = geluaa(lowerCAmelCase_ )
a_ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowerCAmelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _a ( self ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowerCAmelCase_ ):
get_activation('bogus' )
with self.assertRaises(lowerCAmelCase_ ):
get_activation(lowerCAmelCase_ )
def _a ( self ):
"""simple docstring"""
a_ = get_activation('gelu' )
a_ = 1
a_ = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowerCAmelCase_ ):
a_ = acta.a
| 536 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = BlenderbotSmallTokenizer
__snake_case = False
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A__ : Union[str, Any] =["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
A__ : Dict =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : List[str] =["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
A__ : Optional[Any] ={"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowercase__ ( self : List[Any] , **lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : List[Any] ="""adapt act apte"""
A__ : Any ="""adapt act apte"""
return input_text, output_text
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[Any] =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ : List[str] ="""adapt act apte"""
A__ : Union[str, Any] =["""adapt""", """act""", """ap@@""", """te"""]
A__ : List[str] =tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A__ : str =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [13_84]
A__ : str ="""I am a small frog."""
A__ : Dict =tok([src_text] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )["""input_ids"""]
A__ : int =tok.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : str =BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
A__ : Dict ="""I am a small frog ."""
A__ : Union[str, Any] ="""."""
A__ : Optional[int] =tok(lowerCAmelCase_ )["""input_ids"""]
A__ : List[str] =tok(lowerCAmelCase_ )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 215 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( snake_case : float , snake_case : float , snake_case : float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( snake_case : Any , snake_case : int ) -> Dict:
__lowerCamelCase: Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def __lowerCAmelCase ( snake_case : str , snake_case : Union[str, Any] ) -> Tuple:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__lowerCamelCase: Union[str, Any] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
__lowerCamelCase: Any = in_proj_weight[
: encoder_config.hidden_size, :
]
__lowerCamelCase: Dict = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__lowerCamelCase: Any = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowerCAmelCase ( snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[int] ) -> int:
__lowerCamelCase: List[str] = dct.pop(snake_case )
__lowerCamelCase: Tuple = val
def __lowerCAmelCase ( snake_case : Dict ) -> Any:
if "handwritten" in checkpoint_url:
__lowerCamelCase: Tuple = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCamelCase: Union[str, Any] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
__lowerCamelCase: Union[str, Any] = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def __lowerCAmelCase ( snake_case : List[Any] , snake_case : Any ) -> List[str]:
__lowerCamelCase: Union[str, Any] = ViTConfig(image_size=384 , qkv_bias=snake_case )
__lowerCamelCase: Optional[int] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__lowerCamelCase: Optional[int] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
__lowerCamelCase: Optional[int] = 1024
__lowerCamelCase: int = 4096
__lowerCamelCase: Tuple = 24
__lowerCamelCase: Optional[Any] = 16
__lowerCamelCase: str = 1024
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCamelCase: Any = False
__lowerCamelCase: Optional[Any] = """relu"""
__lowerCamelCase: List[Any] = 1024
__lowerCamelCase: List[str] = True
__lowerCamelCase: int = False
__lowerCamelCase: int = False
# load HuggingFace model
__lowerCamelCase: Any = ViTModel(snake_case , add_pooling_layer=snake_case )
__lowerCamelCase: Dict = TrOCRForCausalLM(snake_case )
__lowerCamelCase: Any = VisionEncoderDecoderModel(encoder=snake_case , decoder=snake_case )
model.eval()
# load state_dict of original model, rename some keys
__lowerCamelCase: Any = torch.hub.load_state_dict_from_url(snake_case , map_location="""cpu""" , check_hash=snake_case )["""model"""]
__lowerCamelCase: Optional[Any] = create_rename_keys(snake_case , snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_q_k_v(snake_case , snake_case )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__lowerCamelCase: int = state_dict.pop(snake_case )
if key.startswith("""decoder""" ) and "output_projection" not in key:
__lowerCamelCase: Any = val
else:
__lowerCamelCase: List[str] = val
# load state dict
model.load_state_dict(snake_case )
# Check outputs on an image
__lowerCamelCase: Any = ViTImageProcessor(size=encoder_config.image_size )
__lowerCamelCase: Tuple = RobertaTokenizer.from_pretrained("""roberta-large""" )
__lowerCamelCase: List[Any] = TrOCRProcessor(snake_case , snake_case )
__lowerCamelCase: Optional[int] = processor(images=prepare_img(snake_case ) , return_tensors="""pt""" ).pixel_values
# verify logits
__lowerCamelCase: Tuple = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__lowerCamelCase: Dict = model(pixel_values=snake_case , decoder_input_ids=snake_case )
__lowerCamelCase: Any = outputs.logits
__lowerCamelCase: Dict = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
__lowerCamelCase: Tuple = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
__lowerCamelCase: Optional[int] = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
__lowerCamelCase: Union[str, Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
__lowerCamelCase: Union[str, Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , snake_case , atol=1e-3 ), "First elements of logits not as expected"
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_A : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 189 | 1 |
import inspect
import unittest
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def snake_case ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE_ : List[str] = inspect.getmembers(snake_case__ ,inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE_ : int = 'k-diffusion'
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'invisible-watermark'
assert backend in deps, F'{backend} is not in the deps table!'
| 105 |
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 0
for ch in input_str:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ord(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = pow(2 , lowerCamelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 1 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE( snake_case_ : List[str] ) ->Tuple:
'''simple docstring'''
return EnvironmentCommand()
def _SCREAMING_SNAKE_CASE( snake_case_ : Any ) ->Union[str, Any]:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class _lowerCAmelCase ( __A ):
'''simple docstring'''
@staticmethod
def __lowercase ( UpperCamelCase_ : ArgumentParser ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = parser.add_parser('''env''' )
download_parser.set_defaults(func=UpperCamelCase_ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=UpperCamelCase_ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self : str , UpperCamelCase_ : Optional[int] , *UpperCamelCase_ : Optional[Any] ) -> None:
'''simple docstring'''
_lowercase : Any = accelerate_config_file
def __lowercase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = '''not installed'''
if is_safetensors_available():
import safetensors
_lowercase : Dict = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_lowercase : List[Any] = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
_lowercase : List[Any] = '''not installed'''
_lowercase : Optional[Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowercase : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase_ ):
_lowercase : str = load_config_from_file(self._accelerate_config_file ).to_dict()
_lowercase : Any = (
'''\n'''.join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
else F"\t{accelerate_config}"
)
_lowercase : str = '''not installed'''
_lowercase : Union[str, Any] = '''NA'''
if is_torch_available():
import torch
_lowercase : Optional[int] = torch.__version__
_lowercase : Tuple = torch.cuda.is_available()
_lowercase : int = '''not installed'''
_lowercase : Union[str, Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
_lowercase : Optional[int] = tf.__version__
try:
# deprecated in v2.1
_lowercase : int = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowercase : Dict = bool(tf.config.list_physical_devices('''GPU''' ) )
_lowercase : Any = '''not installed'''
_lowercase : str = '''not installed'''
_lowercase : Any = '''not installed'''
_lowercase : Tuple = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_lowercase : int = flax.__version__
_lowercase : Tuple = jax.__version__
_lowercase : Dict = jaxlib.__version__
_lowercase : Dict = jax.lib.xla_bridge.get_backend().platform
_lowercase : Optional[int] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"{safetensors_version}",
'''Accelerate version''': F"{accelerate_version}",
'''Accelerate config''': F"{accelerate_config_str}",
'''PyTorch version (GPU?)''': F"{pt_version} ({pt_cuda_available})",
'''Tensorflow version (GPU?)''': F"{tf_version} ({tf_cuda_available})",
'''Flax version (CPU?/GPU?/TPU?)''': F"{flax_version} ({jax_backend})",
'''Jax version''': F"{jax_version}",
'''JaxLib version''': F"{jaxlib_version}",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(UpperCamelCase_ ) )
return info
@staticmethod
def __lowercase ( UpperCamelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
_lowercase : List[str] = {
'''input_ids''': tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_lowercase : Any = model(UpperCamelCase_ )['''last_hidden_state''']
_lowercase : List[Any] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
_lowercase : Optional[int] = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 411 | 0 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
debug_launcher(test_script.main )
def snake_case ( self : str ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 497 |
'''simple docstring'''
import os
import sys
__magic_name__ : str = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__magic_name__ : List[Any] = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def A__ ( *A_ , **A_ ) -> List[str]:
return AutoConfig.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A__ ( *A_ , **A_ ) -> str:
return AutoTokenizer.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModel.__doc__ )
def A__ ( *A_ , **A_ ) -> Dict:
return AutoModel.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A__ ( *A_ , **A_ ) -> int:
return AutoModelForCausalLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A__ ( *A_ , **A_ ) -> int:
return AutoModelForMaskedLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A__ ( *A_ , **A_ ) -> Any:
return AutoModelForSequenceClassification.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A__ ( *A_ , **A_ ) -> str:
return AutoModelForQuestionAnswering.from_pretrained(*A_ , **A_ )
| 497 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A_ : List[str] = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 616 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCamelCase :
def __init__( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int = 1_3 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , __UpperCAmelCase : int = 1_2_8 , __UpperCAmelCase : Optional[int]=[1_6, 3_2, 6_4, 1_2_8] , __UpperCAmelCase : int = 7 , __UpperCAmelCase : int = 4 , __UpperCAmelCase : int = 3_7 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 1_0 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1_2_8 , __UpperCAmelCase : List[int] = [2, 2, 2, 2] , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = encoder_stride
SCREAMING_SNAKE_CASE__ = num_attention_outputs
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = embed_dim + 1
SCREAMING_SNAKE_CASE__ = resolution
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = dim
SCREAMING_SNAKE_CASE__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassification(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassification(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : List[Any] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
def check_hidden_states_output(__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int ):
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
SCREAMING_SNAKE_CASE__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
SCREAMING_SNAKE_CASE__ = seq_length * self.model_tester.chunk_length
else:
SCREAMING_SNAKE_CASE__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ = outputs.decoder_hidden_states
self.asseretIsInstance(__UpperCAmelCase , (list, tuple) )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """seq_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """decoder_seq_length""" , __UpperCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any=False ) -> List[str]:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """seq_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """encoder_seq_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """key_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """chunk_length""" , __UpperCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
SCREAMING_SNAKE_CASE__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
SCREAMING_SNAKE_CASE__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__UpperCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.assertTrue(outputs_dict is not None )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""tf""" )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""tf""" )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 616 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=1_28 , a__=32 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
def snake_case_ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def snake_case_ ( self ):
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCamelCase = True
_lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = NezhaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCamelCase = model(a__ , token_type_ids=a__ )
_lowerCamelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCamelCase = True
_lowerCamelCase = NezhaModel(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCamelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , )
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = NezhaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = NezhaForNextSentencePrediction(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = NezhaForPreTraining(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , next_sentence_label=a__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = NezhaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = NezhaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = NezhaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_choices
_lowerCamelCase = NezhaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : List[str] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
def snake_case_ ( self , a__ , a__ , a__=False ):
_lowerCamelCase = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class in get_values(a__ ):
_lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a__ )
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def snake_case_ ( self ):
_lowerCamelCase = NezhaModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a__ )
def snake_case_ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
@slow
def snake_case_ ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = NezhaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@slow
@require_torch_gpu
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_lowerCamelCase = True
_lowerCamelCase = model_class(config=a__ )
_lowerCamelCase = self._prepare_for_class(a__ , a__ )
_lowerCamelCase = torch.jit.trace(
a__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a__ , os.path.join(a__ , 'bert.pt' ) )
_lowerCamelCase = torch.jit.load(os.path.join(a__ , 'bert.pt' ) , map_location=a__ )
loaded(inputs_dict['input_ids'].to(a__ ) , inputs_dict['attention_mask'].to(a__ ) )
@require_torch
class __a ( unittest.TestCase ):
@slow
def snake_case_ ( self ):
_lowerCamelCase = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
_lowerCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase = model(a__ , attention_mask=a__ )[0]
_lowerCamelCase = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , a__ )
_lowerCamelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) )
@slow
def snake_case_ ( self ):
_lowerCamelCase = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
_lowerCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase = model(a__ , attention_mask=a__ )[0]
_lowerCamelCase = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , a__ )
_lowerCamelCase = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) )
| 650 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Union[str, Any] ={"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] =["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str =["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any =[
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple =[
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A_ : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 650 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCAmelCase ( __a ) -> Dict:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCAmelCase ( __a ) -> str:
'''simple docstring'''
_UpperCamelCase :str =create_tensor(__a )
_UpperCamelCase :Dict =gather(__a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCAmelCase ( __a ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase :str =[state.process_index]
_UpperCamelCase :int =gather_object(__a )
assert len(__a ) == state.num_processes, F'''{gathered_obj}, {len(__a )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _lowerCAmelCase ( __a ) -> Any:
'''simple docstring'''
_UpperCamelCase :str =create_tensor(__a )
_UpperCamelCase :str =broadcast(__a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCAmelCase ( __a ) -> List[str]:
'''simple docstring'''
if state.is_main_process:
_UpperCamelCase :List[str] =torch.arange(state.num_processes + 1 ).to(state.device )
else:
_UpperCamelCase :Tuple =torch.arange(state.num_processes ).to(state.device )
_UpperCamelCase :str =pad_across_processes(__a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCAmelCase ( __a ) -> Tuple:
'''simple docstring'''
if state.num_processes != 2:
return
_UpperCamelCase :List[Any] =create_tensor(__a )
_UpperCamelCase :Tuple =reduce(__a , """sum""" )
_UpperCamelCase :int =torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__a , __a ), F'''{reduced_tensor} != {truth_tensor}'''
def _lowerCAmelCase ( __a ) -> str:
'''simple docstring'''
if state.num_processes != 2:
return
_UpperCamelCase :Any =create_tensor(__a )
_UpperCamelCase :str =reduce(__a , """mean""" )
_UpperCamelCase :Optional[int] =torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__a , __a ), F'''{reduced_tensor} != {truth_tensor}'''
def _lowerCAmelCase ( __a ) -> Dict:
'''simple docstring'''
main()
def _lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
_UpperCamelCase :Optional[Any] =PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(__a )
state.print("""testing gather_object""" )
test_gather_object(__a )
state.print("""testing broadcast""" )
test_broadcast(__a )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__a )
state.print("""testing reduce_sum""" )
test_reduce_sum(__a )
state.print("""testing reduce_mean""" )
test_reduce_mean(__a )
if __name__ == "__main__":
main() | 512 | '''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def _lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
_UpperCamelCase :int =9
_UpperCamelCase :Optional[int] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase :Optional[int] =kruskal(__a , __a )
_UpperCamelCase :Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__a ) == sorted(__a ) | 512 | 1 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def a ( __a , __a ) -> Optional[Any]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def a ( __a ) -> int:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__a )
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Any = tmp_path_factory.getbasetemp() / "cache"
UpperCamelCase__ :Union[str, Any] = test_hf_cache_home / "datasets"
UpperCamelCase__ :Any = test_hf_cache_home / "metrics"
UpperCamelCase__ :Any = test_hf_cache_home / "modules"
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(__a ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(__a ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(__a ) )
UpperCamelCase__ :Any = test_hf_datasets_cache / "downloads"
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(__a ) )
UpperCamelCase__ :Dict = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__a ) )
@pytest.fixture(autouse=__a , scope='''session''' )
def a ( ) -> Tuple:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=__a )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , __a )
@pytest.fixture
def a ( __a ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , __a ) | 189 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''roberta'''
def __init__( self , lowerCamelCase=5_02_65 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
UpperCamelCase : Any = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Tuple = use_cache
UpperCamelCase : Any = classifier_dropout
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 173 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[Any]:
_snake_case = torch.load(snake_case_ , map_location='cpu' )
if "model" in sd.keys():
_snake_case = torch.load(snake_case_ , map_location='cpu' )["model"]
# pop unnecessary weights
_snake_case = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
_snake_case = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_snake_case = sd.pop(snake_case_ )
_snake_case = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_snake_case = sd[key]
# We split QKV in separate Q,K,V
_snake_case = key.replace('.qkv_proj.' , '.q_proj.' )
_snake_case = key.replace('.qkv_proj.' , '.k_proj.' )
_snake_case = key.replace('.qkv_proj.' , '.v_proj.' )
_snake_case = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_snake_case = torch.split(snake_case_ , depth // 3 , dim=0 )
_snake_case = q
_snake_case = k
_snake_case = v
del sd[key]
return sd
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None ) -> Any:
_snake_case = load_checkpoint(snake_case_ )
if config is not None:
_snake_case = OPTConfig.from_pretrained(snake_case_ )
else:
_snake_case = OPTConfig()
_snake_case = OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
lowercase : Optional[int] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 703 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_snake_case = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = object_detector(examples[0] , threshold=0.0 )
_snake_case = len(lowerCAmelCase_ )
self.assertGreater(lowerCAmelCase_ , 0 )
self.assertEqual(
lowerCAmelCase_ , [
{
'score': ANY(lowerCAmelCase_ ),
'label': ANY(lowerCAmelCase_ ),
'box': {'xmin': ANY(lowerCAmelCase_ ), 'ymin': ANY(lowerCAmelCase_ ), 'xmax': ANY(lowerCAmelCase_ ), 'ymax': ANY(lowerCAmelCase_ )},
}
for i in range(lowerCAmelCase_ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_snake_case = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
_snake_case = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
_snake_case = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0.2
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 2
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 542 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase (_a ):
_lowercase = ["""pixel_values"""]
def __init__( self: Union[str, Any],A_: bool = True,A_: Dict[str, int] = None,A_: PILImageResampling = PILImageResampling.BICUBIC,A_: bool = True,A_: Union[int, float] = 1 / 255,A_: bool = True,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,A_: bool = True,**A_: List[str],):
'''simple docstring'''
super().__init__(**A_ )
__UpperCamelCase = size if size is not None else {'height': 384, 'width': 384}
__UpperCamelCase = get_size_dict(A_,default_to_square=A_ )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase = do_convert_rgb
def snake_case_ ( self: Union[str, Any],A_: np.ndarray,A_: Dict[str, int],A_: PILImageResampling = PILImageResampling.BICUBIC,A_: Optional[Union[str, ChannelDimension]] = None,**A_: Union[str, Any],):
'''simple docstring'''
__UpperCamelCase = get_size_dict(A_,default_to_square=A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
__UpperCamelCase = (size['height'], size['width'])
return resize(A_,size=A_,resample=A_,data_format=A_,**A_ )
def snake_case_ ( self: Optional[Any],A_: np.ndarray,A_: Union[int, float],A_: Optional[Union[str, ChannelDimension]] = None,**A_: Union[str, Any],):
'''simple docstring'''
return rescale(A_,scale=A_,data_format=A_,**A_ )
def snake_case_ ( self: Tuple,A_: np.ndarray,A_: Union[float, List[float]],A_: Union[float, List[float]],A_: Optional[Union[str, ChannelDimension]] = None,**A_: Dict,):
'''simple docstring'''
return normalize(A_,mean=A_,std=A_,data_format=A_,**A_ )
def snake_case_ ( self: Optional[Any],A_: ImageInput,A_: Optional[bool] = None,A_: Optional[Dict[str, int]] = None,A_: PILImageResampling = None,A_: Optional[bool] = None,A_: Optional[float] = None,A_: Optional[bool] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[str, TensorType]] = None,A_: bool = None,A_: ChannelDimension = ChannelDimension.FIRST,**A_: Tuple,):
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(A_,default_to_square=A_ )
__UpperCamelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase = [convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=A_,size=A_,resample=A_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=A_,scale=A_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=A_,mean=A_,std=A_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(A_,A_ ) for image in images]
__UpperCamelCase = BatchFeature(data={'pixel_values': images},tensor_type=A_ )
return encoded_outputs
| 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCamelCase__ ( __A :Optional[int] ,__A :str ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __snake_case :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __snake_case :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
__SCREAMING_SNAKE_CASE = field(metadata={"help": "Should contain the data files for the task."} )
__SCREAMING_SNAKE_CASE = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase__ ( ):
"""simple docstring"""
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,__A )
# Set seed
set_seed(training_args.seed )
try:
__snake_case = processors[data_args.task_name]()
__snake_case = processor.get_labels()
__snake_case = len(__A )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__A ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
__snake_case = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=__A ,cache_dir=model_args.cache_dir ,)
# Get datasets
__snake_case = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__A ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
__snake_case = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__A ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(__A :EvalPrediction ) -> Dict:
__snake_case = np.argmax(p.predictions ,axis=1 )
return {"acc": simple_accuracy(__A ,p.label_ids )}
# Data collator
__snake_case = DataCollatorWithPadding(__A ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case = Trainer(
model=__A ,args=__A ,train_dataset=__A ,eval_dataset=__A ,compute_metrics=__A ,data_collator=__A ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__snake_case = trainer.evaluate()
__snake_case = os.path.join(training_args.output_dir ,"""eval_results.txt""" )
if trainer.is_world_master():
with open(__A ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" ,__A ,__A )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__A )
return results
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 268 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : List[Any] = (3_2, 3_2)
UpperCamelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase__ )
return image
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=lowercase__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : Any = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
return CLIPTextModel(lowercase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Union[str, Any] = self.dummy_cond_unet_upscale
UpperCamelCase__ : List[Any] = DDPMScheduler()
UpperCamelCase__ : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCamelCase__ : Union[str, Any] = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ : List[str] = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : int = StableDiffusionUpscalePipeline(
unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=3_5_0 , )
UpperCamelCase__ : Tuple = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
UpperCamelCase__ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ : Any = torch.Generator(device=lowercase__ ).manual_seed(0 )
UpperCamelCase__ : int = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowercase__ ).manual_seed(0 )
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowercase__ , )[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCamelCase__ : int = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Dict = self.dummy_cond_unet_upscale
UpperCamelCase__ : Any = DDPMScheduler()
UpperCamelCase__ : List[str] = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCamelCase__ : Optional[Any] = self.dummy_vae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : str = StableDiffusionUpscalePipeline(
unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=3_5_0 , )
UpperCamelCase__ : List[Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
UpperCamelCase__ : str = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ : Optional[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ : Any = output.images
assert image.shape[0] == 2
UpperCamelCase__ : str = torch.Generator(device=lowercase__ ).manual_seed(0 )
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ : str = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : str = self.dummy_cond_unet_upscale
UpperCamelCase__ : Dict = DDPMScheduler()
UpperCamelCase__ : int = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCamelCase__ : Dict = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ : Dict = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
UpperCamelCase__ : Tuple = unet.half()
UpperCamelCase__ : int = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionUpscalePipeline(
unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=3_5_0 , )
UpperCamelCase__ : List[Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
UpperCamelCase__ : List[Any] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , num_inference_steps=2 , output_type='''np''' , ).images
UpperCamelCase__ : List[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCamelCase__ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
UpperCamelCase__ : Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCamelCase__ : Tuple = StableDiffusionUpscalePipeline.from_pretrained(lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
UpperCamelCase__ : Union[str, Any] = '''a cat sitting on a park bench'''
UpperCamelCase__ : Any = torch.manual_seed(0 )
UpperCamelCase__ : Tuple = pipe(
prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , output_type='''np''' , )
UpperCamelCase__ : Optional[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCamelCase__ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
UpperCamelCase__ : int = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCamelCase__ : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowercase__ , torch_dtype=torch.floataa , )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
UpperCamelCase__ : List[str] = '''a cat sitting on a park bench'''
UpperCamelCase__ : List[str] = torch.manual_seed(0 )
UpperCamelCase__ : str = pipe(
prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , output_type='''np''' , )
UpperCamelCase__ : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCamelCase__ : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCamelCase__ : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowercase__ , torch_dtype=torch.floataa , )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ : str = '''a cat sitting on a park bench'''
UpperCamelCase__ : Tuple = torch.manual_seed(0 )
UpperCamelCase__ : List[str] = pipe(
prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , num_inference_steps=5 , output_type='''np''' , )
UpperCamelCase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''realm'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=1e-3 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=3_2_0 , __SCREAMING_SNAKE_CASE=1_3_3_5_3_7_1_8 , __SCREAMING_SNAKE_CASE=5_0_0_0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Common config
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : List[str] = hidden_size
UpperCamelCase__ : Union[str, Any] = retriever_proj_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : str = num_candidates
UpperCamelCase__ : int = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : List[Any] = layer_norm_eps
# Reader config
UpperCamelCase__ : List[Any] = span_hidden_size
UpperCamelCase__ : List[Any] = max_span_width
UpperCamelCase__ : Optional[Any] = reader_layer_norm_eps
UpperCamelCase__ : Optional[Any] = reader_beam_size
UpperCamelCase__ : int = reader_seq_len
# Retrieval config
UpperCamelCase__ : List[str] = num_block_records
UpperCamelCase__ : Union[str, Any] = searcher_beam_size
| 462 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowerCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 259 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if nth_term == "":
return [""]
_a = int(__A)
_a = int(__A)
_a = []
for temp in range(int(__A)):
series.append(F'''1 / {pow(temp + 1 , int(__A))}''' if series else '''1''')
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input("Enter the last number (nth term) of the P-Series"))
lowercase_ = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 713 |
'''simple docstring'''
from __future__ import annotations
lowercase_ = [True] * 1_000_001
lowercase_ = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
lowercase_ = False
i += 1
def lowerCAmelCase (__A):
"""simple docstring"""
return seive[n]
def lowerCAmelCase (__A):
"""simple docstring"""
return any(digit in '''02468''' for digit in str(__A))
def lowerCAmelCase (__A = 1_000_000):
"""simple docstring"""
_a = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2):
if is_prime(__A) and not contains_an_even_digit(__A):
_a = str(__A)
_a = [int(str_num[j:] + str_num[:j]) for j in range(len(__A))]
if all(is_prime(__A) for i in list_nums):
result.append(__A)
return result
def lowerCAmelCase ():
"""simple docstring"""
return len(find_circular_primes())
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""")
| 352 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__snake_case = {'''UserAgent''': UserAgent().random}
def _A ( _lowercase ) -> dict:
"""simple docstring"""
__UpperCamelCase = script.contents[0]
__UpperCamelCase = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCamelCase :
def __init__( self: Tuple,A_: int ):
'''simple docstring'''
__UpperCamelCase = F'''https://www.instagram.com/{username}/'''
__UpperCamelCase = self.get_json()
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = requests.get(self.url,headers=A_ ).text
__UpperCamelCase = BeautifulSoup(A_,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: List[Any] ):
'''simple docstring'''
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: str ):
'''simple docstring'''
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.user_data["username"]
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return self.user_data["biography"]
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return self.user_data["is_private"]
def _A ( _lowercase = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__UpperCamelCase = InstagramUser(_lowercase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowercase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = InstagramUser('''github''')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE: str = '''bart'''
SCREAMING_SNAKE_CASE: int = True
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> int:
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
SCREAMING_SNAKE_CASE_ = qar_model.eval()
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
SCREAMING_SNAKE_CASE_ = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
SCREAMING_SNAKE_CASE_ = sas_model.eval()
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> Union[str, Any]:
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE_ = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
SCREAMING_SNAKE_CASE_ = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
SCREAMING_SNAKE_CASE_ = faiss.IndexFlatIP(128 )
SCREAMING_SNAKE_CASE_ = faiss.index_cpu_to_gpu(lowerCAmelCase , 1 , lowerCAmelCase )
wikiaab_gpu_index_flat.add(lowerCAmelCase ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (None, None)
SCREAMING_SNAKE_CASE_ = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> Optional[Any]:
SCREAMING_SNAKE_CASE_ = datasets.load_dataset('eli5' , name='LFQA_reddit' )
SCREAMING_SNAKE_CASE_ = elia['train_eli5']
SCREAMING_SNAKE_CASE_ = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
SCREAMING_SNAKE_CASE_ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Optional[Any] = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[str] = load_train_data()
def _a ( lowerCAmelCase , lowerCAmelCase=10 )-> Tuple:
SCREAMING_SNAKE_CASE_ = embed_questions_for_retrieval([question] , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = eli5_train_q_index.search(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [elia_train[int(lowerCAmelCase )] for i in I[0]]
return nn_examples
def _a ( lowerCAmelCase , lowerCAmelCase="wiki40b" , lowerCAmelCase="dense" , lowerCAmelCase=10 )-> Union[str, Any]:
if source == "none":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = query_qa_dense_index(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = query_es_index(
lowerCAmelCase , lowerCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
SCREAMING_SNAKE_CASE_ = 'question: {} context: {}'.format(lowerCAmelCase , lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase : None),
} )
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=64 , lowerCAmelCase=256 , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=0.9_5 , lowerCAmelCase=0.8 )-> Tuple:
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = qa_sas_generate(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_answers=1 , num_beams=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase , do_sample=lowerCAmelCase , temp=lowerCAmelCase , top_p=lowerCAmelCase , top_k=lowerCAmelCase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE: List[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE: List[Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE: Union[str, Any] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE: Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE: str = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE: Optional[Any] = action_list.index(action_st)
SCREAMING_SNAKE_CASE: Tuple = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE: Optional[int] = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE: List[str] = 3
SCREAMING_SNAKE_CASE: Tuple = True
SCREAMING_SNAKE_CASE: Any = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE: int = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE: Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE: List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE: List[Any] = '''wiki40b'''
SCREAMING_SNAKE_CASE: Tuple = '''dense'''
SCREAMING_SNAKE_CASE: int = '''beam'''
SCREAMING_SNAKE_CASE: List[Any] = 2
SCREAMING_SNAKE_CASE: Optional[int] = 6_4
SCREAMING_SNAKE_CASE: Tuple = 2_5_6
SCREAMING_SNAKE_CASE: Optional[Any] = None
SCREAMING_SNAKE_CASE: int = None
SCREAMING_SNAKE_CASE: List[str] = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE: Optional[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE: List[str] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE: Optional[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE: Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE: Tuple = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE: Optional[Any] = None
# start main text
SCREAMING_SNAKE_CASE: List[Any] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE: List[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE: Dict = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE: int = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[str] = make_support(question, source=wiki_source, method='''dense''', n_results=1_0)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=1_0)
SCREAMING_SNAKE_CASE: Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE: List[Any] = support_list[:1_0]
SCREAMING_SNAKE_CASE: Union[str, Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE: Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE: Dict = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE: List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE: Optional[int] = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE: str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE: Dict = find_nearest_training(question)
SCREAMING_SNAKE_CASE: int = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE: List[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE: str = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 360 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a ( __UpperCAmelCase ):
lowercase_ : Optional[torch.FloatTensor] = None
lowercase_ : torch.FloatTensor = None
lowercase_ : Optional[Tuple[torch.FloatTensor]] = None
lowercase_ : Optional[Tuple[torch.FloatTensor]] = None
class a ( __UpperCAmelCase ):
def __init__( self : str , snake_case__ : Tuple=1 , snake_case__ : List[str]=0 , snake_case__ : Dict=2 , snake_case__ : Tuple=512 , snake_case__ : Optional[Any]="cls" , snake_case__ : int=False , snake_case__ : Any=True , **snake_case__ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
__lowerCAmelCase = project_dim
__lowerCAmelCase = pooler_fn
__lowerCAmelCase = learn_encoder
__lowerCAmelCase = use_attention_mask
class a ( __UpperCAmelCase ):
lowercase_ : Union[str, Any] = [r'pooler', r'logit_scale']
lowercase_ : Dict = [r'position_ids', r'predictions.decoder.bias']
lowercase_ : Any = 'roberta'
lowercase_ : Union[str, Any] = RobertaSeriesConfig
def __init__( self : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ )
__lowerCAmelCase = XLMRobertaModel(snake_case__ )
__lowerCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
__lowerCAmelCase = getattr(snake_case__ , "has_pre_transformation" , snake_case__ )
if self.has_pre_transformation:
__lowerCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
__lowerCAmelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.base_model(
input_ids=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , position_ids=snake_case__ , head_mask=snake_case__ , inputs_embeds=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_attentions=snake_case__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=snake_case__ , )
if self.has_pre_transformation:
__lowerCAmelCase = outputs["hidden_states"][-2]
__lowerCAmelCase = self.pre_LN(snake_case__ )
__lowerCAmelCase = self.transformation_pre(snake_case__ )
return TransformationModelOutput(
projection_state=snake_case__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__lowerCAmelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=snake_case__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 376 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
__lowerCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = 20
__lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=snake_case__ )
# tweak scores to not be uniform anymore
__lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowerCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowerCAmelCase = jax.nn.softmax(snake_case__ , axis=-1 )
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(snake_case__ , scores.copy() , cur_len=snake_case__ ) , axis=-1 )
__lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(snake_case__ , scores.copy() , cur_len=snake_case__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = 10
__lowerCAmelCase = 2
# create ramp distribution
__lowerCAmelCase = np.broadcast_to(np.arange(snake_case__ )[None, :] , (batch_size, vocab_size) ).copy()
__lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowerCAmelCase = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase = top_k_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowerCAmelCase = 5
__lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowerCAmelCase = np.broadcast_to(np.arange(snake_case__ )[None, :] , (batch_size, length) ).copy()
__lowerCAmelCase = top_k_warp_safety_check(snake_case__ , snake_case__ , cur_len=snake_case__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = 10
__lowerCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
__lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
__lowerCAmelCase = np.exp(top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__lowerCAmelCase = np.broadcast_to(np.arange(snake_case__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowerCAmelCase = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
__lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowerCAmelCase = top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = 20
__lowerCAmelCase = 4
__lowerCAmelCase = 0
__lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case__ )
# check that min length is applied at length 5
__lowerCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__lowerCAmelCase = 5
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = min_dist_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = 15
__lowerCAmelCase = min_dist_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = 20
__lowerCAmelCase = 4
__lowerCAmelCase = 0
__lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
# check that all scores are -inf except the bos_token_id score
__lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__lowerCAmelCase = 1
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowerCAmelCase = 3
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = 20
__lowerCAmelCase = 4
__lowerCAmelCase = 0
__lowerCAmelCase = 5
__lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ , eos_token_id=snake_case__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__lowerCAmelCase = 4
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowerCAmelCase = 3
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = 4
__lowerCAmelCase = 10
__lowerCAmelCase = 15
__lowerCAmelCase = 2
__lowerCAmelCase = 1
__lowerCAmelCase = 15
# dummy input_ids and scores
__lowerCAmelCase = ids_tensor((batch_size, sequence_length) , snake_case__ )
__lowerCAmelCase = input_ids.copy()
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = scores.copy()
# instantiate all dist processors
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case__ )
__lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
__lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ , eos_token_id=snake_case__ )
__lowerCAmelCase = 10
# no processor list
__lowerCAmelCase = temp_dist_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = top_k_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = min_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = bos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = eos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
# with processor list
__lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCAmelCase = processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = 4
__lowerCAmelCase = 10
__lowerCAmelCase = 15
__lowerCAmelCase = 2
__lowerCAmelCase = 1
__lowerCAmelCase = 15
# dummy input_ids and scores
__lowerCAmelCase = ids_tensor((batch_size, sequence_length) , snake_case__ )
__lowerCAmelCase = input_ids.copy()
__lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ )
__lowerCAmelCase = scores.copy()
# instantiate all dist processors
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case__ )
__lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
__lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ , eos_token_id=snake_case__ )
__lowerCAmelCase = 10
# no processor list
def run_no_processor_list(snake_case__ : int , snake_case__ : Any , snake_case__ : int ):
__lowerCAmelCase = temp_dist_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = top_k_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = min_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = bos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
__lowerCAmelCase = eos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ )
return scores
# with processor list
def run_processor_list(snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str ):
__lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCAmelCase = processor(snake_case__ , snake_case__ , cur_len=snake_case__ )
return scores
__lowerCAmelCase = jax.jit(snake_case__ )
__lowerCAmelCase = jax.jit(snake_case__ )
__lowerCAmelCase = jitted_run_no_processor_list(snake_case__ , snake_case__ , snake_case__ )
__lowerCAmelCase = jitted_run_processor_list(snake_case__ , snake_case__ , snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 376 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Tuple = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 438 |
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
with open(os.path.dirname(snake_case ) + "/p022_names.txt" ) as file:
UpperCAmelCase__ : Tuple = str(file.readlines()[0] )
UpperCAmelCase__ : str = names.replace("\"" , "" ).split("," )
names.sort()
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = 0
for i, name in enumerate(snake_case ):
for letter in name:
name_score += ord(snake_case ) - 64
total_score += (i + 1) * name_score
UpperCAmelCase__ : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 438 | 1 |
from bisect import bisect
from itertools import accumulate
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = sorted(zip(UpperCAmelCase__ , UpperCAmelCase__ ) , key=lambda UpperCAmelCase__ : x[0] / x[1] , reverse=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE = list(accumulate(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = bisect(UpperCAmelCase__ , UpperCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 647 | 1 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> np.ndarray:
UpperCAmelCase = cva.getAffineTransform(lowerCamelCase_ , lowerCamelCase_ )
return cva.warpAffine(lowerCamelCase_ , lowerCamelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCamelCase : str = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__lowerCamelCase : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCamelCase , __lowerCamelCase : Dict = gray_img.shape
# set different points to rotate image
__lowerCamelCase : Any = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCamelCase : Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCamelCase : Tuple = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCamelCase : Dict = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCamelCase : int = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCamelCase : Tuple = plt.figure(1)
__lowerCamelCase : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 323 |
import torch
from torch import nn
class __magic_name__ ( nn.Module ):
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=False ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase = n_token
UpperCAmelCase = d_embed
UpperCAmelCase = d_proj
UpperCAmelCase = cutoffs + [n_token]
UpperCAmelCase = [0] + self.cutoffs
UpperCAmelCase = div_val
UpperCAmelCase = self.cutoffs[0]
UpperCAmelCase = len(self.cutoffs ) - 1
UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase = nn.ModuleList()
UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
else:
self.out_projs.append(UpperCamelCase__ )
self.out_layers.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase__ , r_idx - l_idx ) )
UpperCAmelCase = keep_order
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if proj is None:
UpperCAmelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase = nn.functional.linear(UpperCamelCase__ , proj.t().contiguous() )
UpperCAmelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=False ) -> Optional[int]:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase = hidden[..., :-1, :].contiguous()
UpperCAmelCase = labels[..., 1:].contiguous()
UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCAmelCase = labels != -1_00
UpperCAmelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase = (
-nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase = self.out_layers[i].weight
UpperCAmelCase = self.out_layers[i].bias
if i == 0:
UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
if labels is None:
UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase = 0
UpperCAmelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCAmelCase , UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase = labels.index_select(0 , UpperCamelCase__ ) - l_idx
UpperCAmelCase = head_logprob.index_select(0 , UpperCamelCase__ )
UpperCAmelCase = hidden.index_select(0 , UpperCamelCase__ )
else:
UpperCAmelCase = hidden
if i == 0:
if labels is not None:
UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
if self.n_clusters == 0:
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase = self.out_layers[i].weight
UpperCAmelCase = self.out_layers[i].bias
if i == 0:
UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCAmelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCAmelCase , UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase = logprob_i
return out
| 323 | 1 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE ):
_a = ["""torch""", """torchsde"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
| 720 |
from math import pow, sqrt
def a ( *A__ : float ) -> bool:
"""simple docstring"""
_lowercase =len(A__ ) > 0 and all(value > 0.0 for value in values )
return result
def a ( A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 380 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __lowerCAmelCase ( A ):
if isinstance(A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCamelCase :
def __A ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple ):
'''simple docstring'''
pass
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : np.ndarray , lowerCAmelCase : float ):
'''simple docstring'''
UpperCAmelCase_ = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase , lowerCAmelCase , F"Difference between torch and flax is {diff} (>= {tol})." )
def __A ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str=None , **lowerCAmelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel(lowerCAmelCase )
UpperCAmelCase_ = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __A ( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : int=None , **lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase )
UpperCAmelCase_ = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __A ( self : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Any=None , **lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase )
UpperCAmelCase_ = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = after_output[0]
UpperCAmelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase , 1e-3 )
def __A ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple=None , **lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase )
UpperCAmelCase_ = model(
input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase )
UpperCAmelCase_ = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = to_atuple(vision_model.config.image_size )
UpperCAmelCase_ = to_atuple(vision_model.config.patch_size )
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase_ = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __A ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : int ):
'''simple docstring'''
pt_model.to(lowerCAmelCase )
pt_model.eval()
# prepare inputs
UpperCAmelCase_ = inputs_dict
UpperCAmelCase_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase_ = pt_model(**lowerCAmelCase ).to_tuple()
UpperCAmelCase_ = fx_model(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase )
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase , from_pt=lowerCAmelCase )
UpperCAmelCase_ = fx_model_loaded(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase )
UpperCAmelCase_ = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase , from_flax=lowerCAmelCase )
pt_model_loaded.to(lowerCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase_ = pt_model_loaded(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase , pt_output_loaded.numpy() , 4e-2 )
def __A ( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = VisionTextDualEncoderModel(lowerCAmelCase )
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel(lowerCAmelCase )
UpperCAmelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase )
UpperCAmelCase_ = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __A ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = VisionTextDualEncoderModel(lowerCAmelCase )
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel(lowerCAmelCase )
UpperCAmelCase_ = load_flax_weights_in_pytorch_model(lowerCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase )
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase )
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase )
@is_pt_flax_cross_test
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = config_inputs_dict.pop("vision_config" )
UpperCAmelCase_ = config_inputs_dict.pop("text_config" )
UpperCAmelCase_ = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.check_equivalence_flax_to_pt(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.get_pretrained_model_and_inputs()
UpperCAmelCase_ = model_a(**lowerCAmelCase )
UpperCAmelCase_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase )
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = model_a(**lowerCAmelCase )
UpperCAmelCase_ = after_outputs[0]
UpperCAmelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase , 1e-5 )
@require_flax
class __UpperCamelCase ( lowercase , unittest.TestCase ):
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase , text_from_pt=lowerCAmelCase , )
UpperCAmelCase_ = 13
UpperCAmelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase_ = random_attention_mask([batch_size, 4] )
UpperCAmelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __A ( self : int , lowerCAmelCase : Any , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = FlaxViTModel(lowerCAmelCase )
UpperCAmelCase_ = FlaxBertModel(lowerCAmelCase )
return vision_model, text_model
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = FlaxBertModelTester(self )
UpperCAmelCase_ = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = vision_config_and_inputs
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCamelCase ( lowercase , unittest.TestCase ):
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase , text_from_pt=lowerCAmelCase , )
UpperCAmelCase_ = 13
UpperCAmelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase_ = random_attention_mask([batch_size, 4] )
UpperCAmelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __A ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = FlaxCLIPVisionModel(lowerCAmelCase )
UpperCAmelCase_ = FlaxBertModel(lowerCAmelCase )
return vision_model, text_model
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = FlaxCLIPVisionModelTester(self )
UpperCAmelCase_ = FlaxBertModelTester(self )
UpperCAmelCase_ = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = vision_config_and_inputs
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
UpperCAmelCase_ = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase , padding=lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase_ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase , atol=1e-3 ) ) | 162 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a: Union[str, Any] = logging.get_logger(__name__)
_a: Dict = {"""tokenizer_file""": """tokenizer.json"""}
_a: Dict = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = None
def __init__( self : Tuple , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : int=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="<unk>" , lowerCAmelCase : int="<s>" , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = getattr(lowerCAmelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase_ = add_prefix_space
def __A ( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def __A ( self : str , lowerCAmelCase : "Conversation" ):
'''simple docstring'''
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids | 162 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 557 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =CTRLTokenizer
__lowerCAmelCase : int =False
__lowerCAmelCase : Union[str, Any] =False
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowercase =dict(zip(snake_case, range(len(snake_case))))
_lowercase =['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowercase ={'unk_token': '<unk>'}
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(snake_case) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(snake_case))
def UpperCamelCase__ ( self :Union[str, Any], **snake_case :Optional[Any]):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname, **snake_case)
def UpperCamelCase__ ( self :Tuple, snake_case :str):
"""simple docstring"""
_lowercase ='adapt react readapt apt'
_lowercase ='adapt react readapt apt'
return input_text, output_text
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
_lowercase ='adapt react readapt apt'
_lowercase ='adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowercase =tokenizer.tokenize(snake_case)
self.assertListEqual(snake_case, snake_case)
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case), snake_case)
| 557 | 1 |
"""simple docstring"""
def __snake_case ( __A ,__A = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowercase : Optional[int] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowercase : int = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__A ,1 ):
if n < _p:
# then we have our last prime to check
lowercase : List[Any] = primes[:idx]
break
lowercase , lowercase : Optional[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase : Optional[Any] = False
for r in range(__A ):
lowercase : Any = pow(__A ,d * 2**r ,__A )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase : Tuple = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __snake_case ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 607 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( __UpperCamelCase , unittest.TestCase ):
__UpperCAmelCase = MvpTokenizer
__UpperCAmelCase = MvpTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = filter_roberta_detectors
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase : Optional[Any] = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase : int = {"""unk_token""": """<unk>"""}
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case ) )
def _UpperCAmelCase ( self , **snake_case ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , **snake_case ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , snake_case ) -> Optional[int]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase : Dict = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="""pt""" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Any = tokenizer(snake_case , padding=snake_case , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case )
self.assertIn("""attention_mask""" , snake_case )
self.assertNotIn("""labels""" , snake_case )
self.assertNotIn("""decoder_attention_mask""" , snake_case )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : int = tokenizer(text_target=snake_case , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : str = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=snake_case , truncation=snake_case , return_tensors="""pt""" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = ["""A long paragraph for summarization."""]
lowercase : List[str] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : List[str] = tokenizer(snake_case , text_target=snake_case , return_tensors="""pt""" )
lowercase : Union[str, Any] = inputs["""input_ids"""]
lowercase : Optional[Any] = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase : Optional[Any] = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase : str = """A, <mask> AllenNLP sentence."""
lowercase : int = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
lowercase : str = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 607 | 1 |
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = []
if len(__lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ :List[str] = nums.pop(0 )
SCREAMING_SNAKE_CASE_ :Tuple = permute(__lowerCAmelCase )
for perm in permutations:
perm.append(__lowerCAmelCase )
result.extend(__lowerCAmelCase )
nums.append(__lowerCAmelCase )
return result
def lowercase ( a ):
'''simple docstring'''
def backtrack(a ):
if start == len(__lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = nums[i], nums[start]
backtrack(start + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[str] = nums[i], nums[start] # backtrack
SCREAMING_SNAKE_CASE_ :Any = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
SCREAMING_SNAKE_CASE__ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 712 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : Union[str, Any] = KandinskyInpaintPipeline
lowerCamelCase_ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowerCamelCase_ : List[str] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowerCamelCase_ : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCamelCase_ : Dict = False
@property
def _snake_case ( self : int):
return 32
@property
def _snake_case ( self : List[str]):
return 32
@property
def _snake_case ( self : str):
return self.time_input_dim
@property
def _snake_case ( self : Optional[int]):
return self.time_input_dim * 4
@property
def _snake_case ( self : List[Any]):
return 1_00
@property
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base")
return tokenizer
@property
def _snake_case ( self : int):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ :str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
SCREAMING_SNAKE_CASE_ :int = MultilingualCLIP(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = text_encoder.eval()
return text_encoder
@property
def _snake_case ( self : List[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE_ :Dict = UNetaDConditionModel(**UpperCAmelCase)
return model
@property
def _snake_case ( self : List[str]):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self : Optional[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = VQModel(**self.dummy_movq_kwargs)
return model
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ :Tuple = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ :Tuple = self.dummy_unet
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE_ :str = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ :Optional[int] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _snake_case ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : int=0):
SCREAMING_SNAKE_CASE_ :Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase)).to(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(UpperCAmelCase)
# create init_image
SCREAMING_SNAKE_CASE_ :Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase)).to(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_ :Dict = Image.fromarray(np.uinta(UpperCAmelCase)).convert("RGB").resize((2_56, 2_56))
# create mask
SCREAMING_SNAKE_CASE_ :Optional[Any] = np.ones((64, 64) , dtype=np.floataa)
SCREAMING_SNAKE_CASE_ :Dict = 0
if str(UpperCAmelCase).startswith("mps"):
SCREAMING_SNAKE_CASE_ :List[Any] = torch.manual_seed(UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE_ :Any = torch.Generator(device=UpperCAmelCase).manual_seed(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = "cpu"
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ :List[str] = self.pipeline_class(**UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = pipe.to(UpperCAmelCase)
pipe.set_progress_bar_config(disable=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = pipe(**self.get_dummy_inputs(UpperCAmelCase))
SCREAMING_SNAKE_CASE_ :Tuple = output.images
SCREAMING_SNAKE_CASE_ :int = pipe(
**self.get_dummy_inputs(UpperCAmelCase) , return_dict=UpperCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ :Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ :Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}")
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ :str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _snake_case ( self : Tuple):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy")
SCREAMING_SNAKE_CASE_ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
SCREAMING_SNAKE_CASE_ :Optional[int] = np.ones((7_68, 7_68) , dtype=np.floataa)
SCREAMING_SNAKE_CASE_ :Tuple = 0
SCREAMING_SNAKE_CASE_ :List[str] = "a hat"
SCREAMING_SNAKE_CASE_ :str = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa)
pipe_prior.to(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE_ :List[str] = pipeline.to(UpperCAmelCase)
pipeline.set_progress_bar_config(disable=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = pipe_prior(
UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE_ :Optional[int] = pipeline(
UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , image_embeds=UpperCAmelCase , negative_image_embeds=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
SCREAMING_SNAKE_CASE_ :int = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase)
| 140 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict ) -> Optional[int]:
_lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase = [144, 192, 240]
_lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase = [96, 120, 144]
_lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase = [64, 80, 96]
_lowercase = [16, 16, 24, 48, 64, 80, 320]
_lowercase = 0.05
_lowercase = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_lowercase = 512
_lowercase = 16
_lowercase = 21
_lowercase = 'pascal-voc-id2label.json'
else:
_lowercase = 1000
_lowercase = 'imagenet-1k-id2label.json'
_lowercase = 'huggingface/label-files'
_lowercase = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :Dict=False ) -> List[str]:
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
_lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
_lowercase = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_lowercase = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_lowercase = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_lowercase = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_lowercase = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_lowercase = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_lowercase = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_lowercase = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_lowercase = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
_lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
_lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
_lowercase = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_lowercase = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_lowercase = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
_lowercase = name.replace(F""".global_rep.{i}.weight""" , '.layernorm.weight' )
if F""".global_rep.{i}.bias""" in name:
_lowercase = name.replace(F""".global_rep.{i}.bias""" , '.layernorm.bias' )
if ".global_rep." in name:
_lowercase = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_lowercase = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_lowercase = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_lowercase = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_lowercase = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_lowercase = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_lowercase = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_lowercase = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_lowercase = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_lowercase = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_lowercase = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase = 'mobilevit.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :Dict , snake_case__ :Tuple=False ) -> Optional[int]:
if base_model:
_lowercase = ''
else:
_lowercase = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_lowercase = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
_lowercase = key[8:]
if "qkv" in key:
_lowercase = key.split('.' )
_lowercase = int(key_split[0][6:] ) - 1
_lowercase = int(key_split[3] )
_lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
_lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
_lowercase = val[:dim, :]
_lowercase = val[dim : dim * 2, :]
_lowercase = val[-dim:, :]
else:
_lowercase = val[:dim]
_lowercase = val[dim : dim * 2]
_lowercase = val[-dim:]
else:
_lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :Tuple , snake_case__ :List[str] , snake_case__ :Tuple=False ) -> Optional[Any]:
_lowercase = get_mobilevit_config(snake_case__ )
# load original state_dict
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_lowercase = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
_lowercase = MobileViTForImageClassification(snake_case__ ).eval()
_lowercase = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase = image_processor(images=prepare_img() , return_tensors='pt' )
_lowercase = model(**snake_case__ )
_lowercase = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_lowercase = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1E-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
_lowercase = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__ , organization='apple' )
model.push_to_hub(snake_case__ , organization='apple' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 67 |
'''simple docstring'''
from math import ceil
def A__ ( UpperCAmelCase_ = 1_0_0_1 ):
_UpperCamelCase : int = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_UpperCamelCase : Dict = 2 * i + 1
_UpperCamelCase : Tuple = 2 * i
_UpperCamelCase : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
snake_case_ : Tuple = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 195 | 0 |
from __future__ import annotations
def a__ ( A_ ):
'''simple docstring'''
if len(A_ ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__magic_name__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase : str = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48000,
'sample_size': 65536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48000,
'sample_size': 65536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48000,
'sample_size': 131072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
}
def a__ ( A_, A_ ):
'''simple docstring'''
return torch.atana(A_, A_ ) / math.pi * 2
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.sin(t * math.pi / 2 ) ** 2
__magic_name__ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(A_, A_ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
pass
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 )
__magic_name__ = deepcopy(self.diffusion )
__magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = MODELS_MAP[model_name]["""url"""]
os.system(f'''wget {url} ./''' )
return f'''./{model_name}.ckpt'''
__lowerCAmelCase : Optional[int] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__lowerCAmelCase : Optional[Any] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__lowerCAmelCase : Union[str, Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__lowerCAmelCase : int = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__lowerCAmelCase : List[str] = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__lowerCAmelCase : int = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def a__ ( A_ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""", RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'''ResConvBlock error with {name}''' )
return name.replace(name[:6], RES_CONV_MAP[name[:6]] )
def a__ ( A_ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(A_ ) and not isinstance(A_, A_ ):
return name.replace(A_, A_ )
elif name.startswith(A_ ):
return [name.replace(A_, A_ ) for v in value]
raise ValueError(f'''Attn error with {name}''' )
def a__ ( A_, A_=13 ):
'''simple docstring'''
__magic_name__ = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""", """time_proj""" )
__magic_name__ = 0
if string.startswith("""net.3.""" ):
depth += 1
__magic_name__ = string[6:]
elif string.startswith("""net.""" ):
__magic_name__ = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
__magic_name__ = string[7:]
if string.startswith("""main.""" ):
__magic_name__ = string[5:]
# mid block
if string[:2].isdigit():
__magic_name__ = string[:2]
__magic_name__ = string[2:]
else:
__magic_name__ = string[0]
__magic_name__ = string[1:]
if depth == max_depth:
__magic_name__ = MID_NUM_TO_LAYER[layer_num]
__magic_name__ = """mid_block"""
elif depth > 0 and int(A_ ) < 7:
__magic_name__ = DOWN_NUM_TO_LAYER[layer_num]
__magic_name__ = f'''down_blocks.{depth}'''
elif depth > 0 and int(A_ ) > 7:
__magic_name__ = UP_NUM_TO_LAYER[layer_num]
__magic_name__ = f'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
__magic_name__ = DEPTH_0_TO_LAYER[layer_num]
__magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' )
__magic_name__ = string_left[1:]
if "resnets" in new_layer:
__magic_name__ = convert_resconv_naming(A_ )
elif "attentions" in new_layer:
__magic_name__ = convert_attn_naming(A_ )
__magic_name__ = new_string_left
if not isinstance(A_, A_ ):
__magic_name__ = prefix + """.""" + new_layer + """.""" + string_left
else:
__magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
__magic_name__ = rename(A_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(A_, A_ ):
__magic_name__ = transform_conv_attns(A_, A_, A_ )
else:
__magic_name__ = v
return new_state_dict
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if len(A_ ) == 1:
if len(v.shape ) == 3:
# weight
__magic_name__ = v[:, :, 0]
else:
# bias
__magic_name__ = v
else:
# qkv matrices
__magic_name__ = v.shape[0]
__magic_name__ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__magic_name__ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
__magic_name__ = download(A_ )
__magic_name__ = MODELS_MAP[model_name]["""sample_rate"""]
__magic_name__ = MODELS_MAP[model_name]["""sample_size"""]
__magic_name__ = Object()
__magic_name__ = sample_size
__magic_name__ = sample_rate
__magic_name__ = 0
__magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ )
__magic_name__ = diffusers_model.state_dict()
__magic_name__ = DiffusionUncond(A_ )
orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] )
__magic_name__ = orig_model.diffusion_ema.eval()
__magic_name__ = orig_model.state_dict()
__magic_name__ = rename_orig_weights(A_ )
__magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
__magic_name__ = value.squeeze()
__magic_name__ = value
diffusers_model.load_state_dict(A_ )
__magic_name__ = 100
__magic_name__ = 33
__magic_name__ = IPNDMScheduler(num_train_timesteps=A_ )
__magic_name__ = torch.manual_seed(A_ )
__magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ )
__magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1]
__magic_name__ = get_crash_schedule(A_ )
__magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ )
__magic_name__ = torch.manual_seed(33 )
__magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios
__magic_name__ = sampling.iplms_sample(A_, A_, A_, {} )
__magic_name__ = generated.clamp(-1, 1 )
__magic_name__ = (generated - audio).abs().sum()
__magic_name__ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""", A_ )
print("""Diff max""", A_ )
assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/'''
print(f'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
main(args)
| 76 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase = set()
return any(
node not in visited and depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for node in graph )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
visited.add(__SCREAMING_SNAKE_CASE )
rec_stk.add(__SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 |
from __future__ import annotations
_lowercase : Optional[int] =1.6021E-19 # units = C
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : float , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0) != 1:
raise ValueError("""You cannot supply more or less than 2 values""")
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""")
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""")
elif mobility < 0:
raise ValueError("""mobility cannot be negative""")
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : str , snake_case__ : list[str] | None = None , snake_case__ : dict[str, float] | None = None , snake_case__ : bool = False , ) -> tuple[int, float, str]:
'''simple docstring'''
lowerCAmelCase = cipher_alphabet or [chr(snake_case__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase = {
"""a""": 0.08_497,
"""b""": 0.01_492,
"""c""": 0.02_202,
"""d""": 0.04_253,
"""e""": 0.11_162,
"""f""": 0.02_228,
"""g""": 0.02_015,
"""h""": 0.06_094,
"""i""": 0.07_546,
"""j""": 0.00_153,
"""k""": 0.01_292,
"""l""": 0.04_025,
"""m""": 0.02_406,
"""n""": 0.06_749,
"""o""": 0.07_507,
"""p""": 0.01_929,
"""q""": 0.00_095,
"""r""": 0.07_587,
"""s""": 0.06_327,
"""t""": 0.09_356,
"""u""": 0.02_758,
"""v""": 0.00_978,
"""w""": 0.02_560,
"""x""": 0.00_150,
"""y""": 0.01_994,
"""z""": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase = frequencies_dict
if not case_sensitive:
lowerCAmelCase = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase = {}
# cycle through all of the shifts
for shift in range(len(snake_case__ ) ):
lowerCAmelCase = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
snake_case__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase = decrypted_with_shift.lower().count(snake_case__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase = decrypted_with_shift.count(snake_case__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(snake_case__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase = min(
snake_case__ , key=snake_case__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 529 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase (snake_case__ : Dict , snake_case__ : int=0.999 , snake_case__ : Tuple="cosine" , ) -> Any:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase = []
for i in range(snake_case__ ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( _a , _a ):
_a = [e.name for e in KarrasDiffusionSchedulers]
_a = 2
@register_to_config
def __init__( self : List[Any] , lowerCAmelCase : int = 1000 , lowerCAmelCase : float = 0.0_0085 , lowerCAmelCase : float = 0.012 , lowerCAmelCase : str = "linear" , lowerCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase : str = "epsilon" , lowerCAmelCase : Optional[bool] = False , lowerCAmelCase : Optional[bool] = False , lowerCAmelCase : float = 1.0 , lowerCAmelCase : str = "linspace" , lowerCAmelCase : int = 0 , ):
if trained_betas is not None:
lowerCAmelCase = torch.tensor(lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase = torch.linspace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(lowerCAmelCase , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
lowerCAmelCase = betas_for_alpha_bar(lowerCAmelCase , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase = 1.0 - self.betas
lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = use_karras_sigmas
def __lowercase ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]=None ):
if schedule_timesteps is None:
lowerCAmelCase = self.timesteps
lowerCAmelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase = 1 if len(lowerCAmelCase ) > 1 else 0
else:
lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep
lowerCAmelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : Dict , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Union[float, torch.FloatTensor] , ):
lowerCAmelCase = self.index_for_timestep(lowerCAmelCase )
lowerCAmelCase = self.sigmas[step_index]
lowerCAmelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Union[str, torch.device] = None , lowerCAmelCase : Optional[int] = None , ):
lowerCAmelCase = num_inference_steps
lowerCAmelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase , dtype=lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase = np.log(lowerCAmelCase )
lowerCAmelCase = np.interp(lowerCAmelCase , np.arange(0 , len(lowerCAmelCase ) ) , lowerCAmelCase )
if self.config.use_karras_sigmas:
lowerCAmelCase = self._convert_to_karras(in_sigmas=lowerCAmelCase , num_inference_steps=self.num_inference_steps )
lowerCAmelCase = np.array([self._sigma_to_t(lowerCAmelCase , lowerCAmelCase ) for sigma in sigmas] )
lowerCAmelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase = torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase )
lowerCAmelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase = torch.from_numpy(lowerCAmelCase )
lowerCAmelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase ).startswith("""mps""" ):
# mps does not support float64
lowerCAmelCase = timesteps.to(lowerCAmelCase , dtype=torch.floataa )
else:
lowerCAmelCase = timesteps.to(device=lowerCAmelCase )
# empty dt and derivative
lowerCAmelCase = None
lowerCAmelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase = defaultdict(lowerCAmelCase )
def __lowercase ( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any ):
# get log sigma
lowerCAmelCase = np.log(lowerCAmelCase )
# get distribution
lowerCAmelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase = low_idx + 1
lowerCAmelCase = log_sigmas[low_idx]
lowerCAmelCase = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase = (low - log_sigma) / (low - high)
lowerCAmelCase = np.clip(lowerCAmelCase , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase = (1 - w) * low_idx + w * high_idx
lowerCAmelCase = t.reshape(sigma.shape )
return t
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = in_sigmas[-1].item()
lowerCAmelCase = in_sigmas[0].item()
lowerCAmelCase = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase = np.linspace(0 , 1 , lowerCAmelCase )
lowerCAmelCase = sigma_min ** (1 / rho)
lowerCAmelCase = sigma_max ** (1 / rho)
lowerCAmelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Dict ):
return self.dt is None
def __lowercase ( self : str , lowerCAmelCase : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase : Union[float, torch.FloatTensor] , lowerCAmelCase : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase : bool = True , ):
lowerCAmelCase = self.index_for_timestep(lowerCAmelCase )
# advance index counter by 1
lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase = self.sigmas[step_index]
lowerCAmelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase = self.sigmas[step_index - 1]
lowerCAmelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase = 0
lowerCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase = derivative
lowerCAmelCase = dt
lowerCAmelCase = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase = (sample - pred_original_sample) / sigma_next
lowerCAmelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase = self.dt
lowerCAmelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def __lowercase ( self : Optional[int] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase ):
# mps does not support float64
lowerCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase = self.timesteps.to(original_samples.device )
lowerCAmelCase = timesteps.to(original_samples.device )
lowerCAmelCase = [self.index_for_timestep(lowerCAmelCase , lowerCAmelCase ) for t in timesteps]
lowerCAmelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase = sigma.unsqueeze(-1 )
lowerCAmelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self : Union[str, Any] ):
return self.config.num_train_timesteps
| 529 | 1 |
"""simple docstring"""
def snake_case ( A__ ):
return "".join(chr(ord(A__ ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 95 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : List[Any] = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["ConvNextFeatureExtractor"]
__lowercase : Union[str, Any] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure) | 564 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase = 16
UpperCAmelCase = 32
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 ):
lowercase = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase = 16
elif accelerator.mixed_precision != "no":
lowercase = 8
else:
lowercase = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='longest' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets['train'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(
tokenized_datasets['validation'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __SCREAMING_SNAKE_CASE ) == "1":
lowercase = 2
# New Code #
lowercase = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config['lr']
lowercase = int(config['num_epochs'] )
lowercase = int(config['seed'] )
lowercase = int(config['batch_size'] )
lowercase = evaluate.load('glue' , 'mrpc' )
set_seed(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase = model.to(accelerator.device )
# Instantiate optimizer
lowercase = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase , lowercase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( ):
lowercase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__SCREAMING_SNAKE_CASE , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowercase = parser.parse_args()
lowercase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 703 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if "model" in orig_key:
lowercase = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
lowercase = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
lowercase = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
lowercase = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
lowercase = orig_key.split('.' )[0].split('_' )[-1]
lowercase = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
lowercase = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
lowercase = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
lowercase = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
lowercase = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
lowercase = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
lowercase = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
lowercase = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
lowercase = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
lowercase = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
lowercase = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
lowercase = 'yoso.' + orig_key
return orig_key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase = val
lowercase = orig_state_dict['cls.predictions.decoder.bias']
lowercase = torch.arange(__SCREAMING_SNAKE_CASE ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model_state_dict']
lowercase = YosoConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase = YosoForMaskedLM(__SCREAMING_SNAKE_CASE )
lowercase = convert_checkpoint_helper(config.max_position_embeddings , __SCREAMING_SNAKE_CASE )
print(model.load_state_dict(__SCREAMING_SNAKE_CASE ) )
model.eval()
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 565 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.