code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Any ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=True ):
model.train()
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = F.mse_loss(__lowerCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=False ):
set_seed(42 )
__UpperCAmelCase : Optional[int] = RegressionModel()
__UpperCAmelCase : Tuple = deepcopy(__lowerCamelCase )
__UpperCAmelCase : Tuple = RegressionDataset(length=80 )
__UpperCAmelCase : Dict = DataLoader(__lowerCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase : Optional[Any] = AdamW(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__UpperCAmelCase : Any = LambdaLR(__lowerCamelCase , lr_lambda=lambda __lowerCamelCase : epoch**0.6_5 )
__UpperCAmelCase : str = LambdaLR(__lowerCamelCase , lr_lambda=lambda __lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.prepare(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
__UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_training_setup(__lowerCamelCase )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCAmelCase : Optional[Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def lowerCamelCase__ ( __lowerCamelCase : Any ):
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = get_training_setup(__lowerCamelCase )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCAmelCase : str = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def lowerCamelCase__ ( __lowerCamelCase : Dict=False , __lowerCamelCase : List[Any]=False ):
__UpperCAmelCase : Tuple = Accelerator(
split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = get_training_setup(__lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCAmelCase : Dict = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
GradientState._reset_state()
def lowerCamelCase__ ( __lowerCamelCase : List[str]=False , __lowerCamelCase : Union[str, Any]=False ):
__UpperCAmelCase : Optional[int] = Accelerator(
split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_training_setup(__lowerCamelCase , __lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : str = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
__UpperCAmelCase : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[str] = Accelerator()
__UpperCAmelCase : Optional[Any] = RegressionDataset(length=80 )
__UpperCAmelCase : Any = DataLoader(__lowerCamelCase , batch_size=16 )
__UpperCAmelCase : Optional[Any] = RegressionDataset(length=96 )
__UpperCAmelCase : Union[str, Any] = DataLoader(__lowerCamelCase , batch_size=16 )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if iteration < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if batch_num < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase__ ( ):
__UpperCAmelCase : int = Accelerator()
__UpperCAmelCase : Dict = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(__lowerCamelCase , __lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 1 |
from ..utils import DummyObject, requires_backends
class a ( metaclass=lowercase__ ):
"""simple docstring"""
a : List[str] = ['keras_nlp']
def __init__( self : Optional[int] , *__lowercase : List[str] , **__lowercase : Dict ) -> List[str]:
requires_backends(self , ["""keras_nlp"""] )
| 63 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : int ):
if digit_amount > 0:
return round(number - int(__lowerCamelCase ) , __lowerCamelCase )
return number - int(__lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 63 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Dict = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : List[str] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : Dict = None
a : int = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : Dict = 45
a : List[Any] = 1_581
a : Tuple = 1_517
a : Dict = 1_570
a : Any = 1_584
a : Optional[Any] = 1_793
a : Union[str, Any] = 1_795
a : Tuple = 1_916
a : Any = 1_864
a : Dict = 1_905
a : List[str] = 1_919
a : List[Any] = 2_429
a : Tuple = 2_208
a : Union[str, Any] = 2_418
a : Any = 2_323
a : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 63 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
__UpperCAmelCase : int = number_of_bytes // partitions
__UpperCAmelCase : str = []
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = i * bytes_per_partition + 1
__UpperCAmelCase : Any = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a :
"""simple docstring"""
a : int
a : int
class a :
"""simple docstring"""
def __init__( self : Optional[int] , __lowercase : int ) -> List[Any]:
__UpperCAmelCase : list[list[Edge]] = [[] for _ in range(__lowercase )]
__UpperCAmelCase : Optional[Any] = size
def __getitem__( self : int , __lowercase : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
return self._size
def UpperCAmelCase ( self : List[str] , __lowercase : int , __lowercase : int , __lowercase : int ) -> Any:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(__lowercase , __lowercase ) )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : int ) -> int | None:
__UpperCAmelCase : List[str] = deque([start_vertex] )
__UpperCAmelCase : list[int | None] = [None] * self.size
__UpperCAmelCase : Optional[int] = 0
while queue:
__UpperCAmelCase : Tuple = queue.popleft()
__UpperCAmelCase : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCAmelCase : Any = current_distance + edge.weight
__UpperCAmelCase : Tuple = distances[edge.destination_vertex]
if (
isinstance(__lowercase , __lowercase )
and new_distance >= dest_vertex_distance
):
continue
__UpperCAmelCase : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
a : str = False
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
return 12
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return 12
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
return 32
@property
def UpperCAmelCase ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase ( self : Optional[int] ) -> int:
__UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowercase )
@property
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = 12
__UpperCAmelCase : Optional[Any] = 12
__UpperCAmelCase : Any = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
__UpperCAmelCase : Tuple = TransformeraDModel(**__lowercase )
return model
def UpperCAmelCase ( self : str ) -> Optional[int]:
__UpperCAmelCase : List[str] = """cpu"""
__UpperCAmelCase : Optional[Any] = self.dummy_vqvae
__UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
__UpperCAmelCase : List[Any] = self.dummy_tokenizer
__UpperCAmelCase : int = self.dummy_transformer
__UpperCAmelCase : Any = VQDiffusionScheduler(self.num_embed )
__UpperCAmelCase : List[str] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
__UpperCAmelCase : Any = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
__UpperCAmelCase : List[Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = """teddy bear playing in the pool"""
__UpperCAmelCase : Tuple = torch.Generator(device=__lowercase ).manual_seed(0 )
__UpperCAmelCase : List[Any] = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type="""np""" )
__UpperCAmelCase : str = output.images
__UpperCAmelCase : Dict = torch.Generator(device=__lowercase ).manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type="""np""" , return_dict=__lowercase , num_inference_steps=2 )[0]
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__UpperCAmelCase : Optional[int] = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = """cpu"""
__UpperCAmelCase : Any = self.dummy_vqvae
__UpperCAmelCase : Tuple = self.dummy_text_encoder
__UpperCAmelCase : int = self.dummy_tokenizer
__UpperCAmelCase : List[Any] = self.dummy_transformer
__UpperCAmelCase : Tuple = VQDiffusionScheduler(self.num_embed )
__UpperCAmelCase : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__UpperCAmelCase : str = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
__UpperCAmelCase : str = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = """teddy bear playing in the pool"""
__UpperCAmelCase : Any = torch.Generator(device=__lowercase ).manual_seed(0 )
__UpperCAmelCase : Any = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type="""np""" )
__UpperCAmelCase : Union[str, Any] = output.images
__UpperCAmelCase : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
__UpperCAmelCase : str = pipe(
[prompt] , generator=__lowercase , output_type="""np""" , return_dict=__lowercase , num_inference_steps=2 )[0]
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__UpperCAmelCase : Dict = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : str ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
__UpperCAmelCase : List[str] = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
__UpperCAmelCase : Tuple = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__UpperCAmelCase : Tuple = torch.Generator(device=__lowercase ).manual_seed(0 )
__UpperCAmelCase : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__lowercase , output_type="""np""" , )
__UpperCAmelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 63 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 1 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 1 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
a : Union[str, Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class a :
"""simple docstring"""
def __init__( self : str , __lowercase : Any , __lowercase : Optional[int]=16 , __lowercase : Union[str, Any]=13 , __lowercase : int=7 , __lowercase : List[Any]=14 , __lowercase : List[Any]=10 , __lowercase : List[str]=19 , __lowercase : List[str]=5 , __lowercase : Optional[int]=4 , __lowercase : int=True , __lowercase : Dict=16 , __lowercase : Any=2 , __lowercase : Union[str, Any]=4 , __lowercase : Optional[Any]=4 , __lowercase : Union[str, Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Dict=0.1 , __lowercase : List[str]=[1, 2, 3, 4, 5] , __lowercase : Optional[Any]=25 , __lowercase : List[str]=5 , ) -> Dict:
__UpperCAmelCase : Any = d_model
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : List[str] = prediction_length
__UpperCAmelCase : Dict = context_length
__UpperCAmelCase : Any = cardinality
__UpperCAmelCase : Any = num_time_features
__UpperCAmelCase : Optional[Any] = lags_sequence
__UpperCAmelCase : Union[str, Any] = embedding_dimension
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Any = context_length
__UpperCAmelCase : int = prediction_length + label_length
__UpperCAmelCase : List[str] = label_length
__UpperCAmelCase : Union[str, Any] = moving_average
__UpperCAmelCase : Optional[int] = autocorrelation_factor
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase ( self : Optional[int] , __lowercase : str ) -> Optional[Any]:
__UpperCAmelCase : Tuple = config.context_length + max(config.lags_sequence )
__UpperCAmelCase : str = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__UpperCAmelCase : str = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, _past_length] )
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__UpperCAmelCase : Any = floats_tensor([self.batch_size, config.prediction_length] )
__UpperCAmelCase : Union[str, Any] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__UpperCAmelCase : Any = self.get_config()
__UpperCAmelCase : int = self.prepare_autoformer_inputs_dict(__lowercase )
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] ) -> int:
__UpperCAmelCase : int = AutoformerModel(config=__lowercase ).to(__lowercase ).eval()
__UpperCAmelCase : int = model(**__lowercase )
__UpperCAmelCase : Optional[Any] = outputs.encoder_last_hidden_state
__UpperCAmelCase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : int = model.get_encoder()
encoder.save_pretrained(__lowercase )
__UpperCAmelCase : List[str] = AutoformerEncoder.from_pretrained(__lowercase ).to(__lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = model.create_network_inputs(**__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__UpperCAmelCase : Tuple = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__UpperCAmelCase : str = encoder(inputs_embeds=__lowercase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__UpperCAmelCase : int = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__UpperCAmelCase : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__UpperCAmelCase : int = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__UpperCAmelCase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__lowercase )
__UpperCAmelCase : List[Any] = AutoformerDecoder.from_pretrained(__lowercase ).to(__lowercase )
__UpperCAmelCase : Optional[int] = decoder(
trend=__lowercase , inputs_embeds=__lowercase , encoder_hidden_states=__lowercase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a : Any = (AutoformerForPrediction,) if is_torch_available() else ()
a : Dict = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
a : Optional[int] = False
a : List[str] = False
a : str = False
a : Optional[int] = False
a : Union[str, Any] = False
a : Tuple = False
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase : Tuple = AutoformerModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCAmelCase ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> str:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model_class.from_pretrained(__lowercase , output_loading_info=__lowercase )
self.assertEqual(info["""missing_keys"""] , [] )
def UpperCAmelCase ( self : Optional[Any] ) -> int:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowercase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def UpperCAmelCase ( self : Any ) -> List[Any]:
pass
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : Any = inspect.signature(getattr(__lowercase , """forward""" ) )
# The main input is the name of the argument after `self`
__UpperCAmelCase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __lowercase )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(__lowercase )
__UpperCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Tuple = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__lowercase )] , __lowercase )
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[str] = getattr(self.model_tester , """seq_length""" , __lowercase )
__UpperCAmelCase : Optional[int] = getattr(self.model_tester , """decoder_seq_length""" , __lowercase )
__UpperCAmelCase : Dict = getattr(self.model_tester , """encoder_seq_length""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = getattr(self.model_tester , """d_model""" , __lowercase )
__UpperCAmelCase : List[Any] = getattr(self.model_tester , """num_attention_heads""" , __lowercase )
__UpperCAmelCase : List[Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Union[str, Any] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : int = True
__UpperCAmelCase : int = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__UpperCAmelCase : List[str] = len(__lowercase )
__UpperCAmelCase : Union[str, Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__lowercase , __lowercase )
# decoder attentions
__UpperCAmelCase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__lowercase , (list, tuple) )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__UpperCAmelCase : Tuple = outputs.cross_attentions
self.assertIsInstance(__lowercase , (list, tuple) )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : int = True
__UpperCAmelCase : List[str] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 2 , len(__lowercase ) )
__UpperCAmelCase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( __lowerCamelCase : Tuple="train-batch.pt" ):
__UpperCAmelCase : str = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=__lowerCamelCase , repo_type="""dataset""" )
__UpperCAmelCase : Optional[Any] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
return batch
@require_torch
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase : Dict = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowercase )
__UpperCAmelCase : Dict = prepare_batch()
with torch.no_grad():
__UpperCAmelCase : int = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__UpperCAmelCase : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : Optional[int] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=__lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowercase , atol=__lowercase ) )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowercase )
__UpperCAmelCase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__UpperCAmelCase : Any = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__UpperCAmelCase : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : Dict = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=__lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowercase , atol=__lowercase ) )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowercase )
__UpperCAmelCase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__UpperCAmelCase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__UpperCAmelCase : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __lowercase )
__UpperCAmelCase : Optional[Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=__lowercase )
__UpperCAmelCase : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __lowercase , rtol=1e-1 ) )
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
a : int = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a : Optional[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
a : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 63 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 1 |
a : Optional[Any] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase__ ( ):
__UpperCAmelCase : int = input("""Enter message: """ )
__UpperCAmelCase : Any = input("""Enter key [alphanumeric]: """ )
__UpperCAmelCase : Union[str, Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__UpperCAmelCase : Dict = """encrypt"""
__UpperCAmelCase : List[Any] = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__UpperCAmelCase : Optional[Any] = """decrypt"""
__UpperCAmelCase : List[Any] = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f"""\n{mode.title()}ed message:""" )
print(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str ):
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = 0
__UpperCAmelCase : List[Any] = key.upper()
for symbol in message:
__UpperCAmelCase : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a : str = logging.get_logger(__name__)
a : Union[str, Any] = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
a : Union[str, Any] = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
a : int = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
a : List[str] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
a : int = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
a : Optional[int] = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
a : Any = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
a : Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
a : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
a : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
a : str = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
a : Dict = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
a : Union[str, Any] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
a : Optional[Any] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
a : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : Any = FLAX_MODEL_MAPPING
a : List[str] = auto_class_update(FlaxAutoModel)
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
a : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
a : str = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : List[str] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : List[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a : Dict = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a : Tuple = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : Optional[int] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a : str = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class a ( _BaseAutoModelClass ):
"""simple docstring"""
a : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a : int = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 63 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : List[str] = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = 0, 0, 0
__UpperCAmelCase : Any = ugly_nums[ia] * 2
__UpperCAmelCase : Optional[Any] = ugly_nums[ia] * 3
__UpperCAmelCase : List[Any] = ugly_nums[ia] * 5
for _ in range(1 , __lowerCamelCase ):
__UpperCAmelCase : str = min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
ugly_nums.append(__lowerCamelCase )
if next_num == next_a:
ia += 1
__UpperCAmelCase : Dict = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCAmelCase : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCAmelCase : int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(200) = }""")
| 63 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
a : Tuple = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Union[str, Any] , **__lowercase : List[Any] ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : Any = deprecated_arg[3:]
__UpperCAmelCase : List[Any] = not kwargs.pop(__lowercase )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : Tuple = kwargs.pop("""tpu_name""" , self.tpu_name )
__UpperCAmelCase : str = kwargs.pop("""device_idx""" , self.device_idx )
__UpperCAmelCase : Tuple = kwargs.pop("""eager_mode""" , self.eager_mode )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__lowercase )
a : str = field(
default=lowercase__ , metadata={'help': 'Name of TPU'} , )
a : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
a : bool = field(default=lowercase__ , metadata={'help': 'Benchmark models in eager model.'} )
a : bool = field(
default=lowercase__ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCAmelCase ( self : int ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
__UpperCAmelCase : List[str] = None
if self.tpu:
try:
if self.tpu_name:
__UpperCAmelCase : int = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__UpperCAmelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__UpperCAmelCase : Any = None
return tpu
@cached_property
def UpperCAmelCase ( self : List[str] ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__UpperCAmelCase : Tuple = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__UpperCAmelCase : Tuple = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__UpperCAmelCase : int = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCAmelCase ( self : Optional[Any] ) -> bool:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def UpperCAmelCase ( self : Dict ) -> "tf.distribute.Strategy":
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def UpperCAmelCase ( self : Any ) -> int:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def UpperCAmelCase ( self : List[Any] ) -> int:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCAmelCase ( self : str ) -> bool:
return self.n_gpu > 0
| 63 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
__UpperCAmelCase : Any = []
for line in lines:
__UpperCAmelCase : Optional[int] = re.sub(R"""#.*""" , """""" , __lowerCamelCase ) # remove comments
if line:
filtered_lines.append(__lowerCamelCase )
__UpperCAmelCase : Any = """\n""".join(__lowerCamelCase )
# Make a hash from all this code
__UpperCAmelCase : int = full_str.encode("""utf-8""" )
return shaaaa(__lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
a : Union[str, Any] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
a : str = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
a : Any = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
a : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 63 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : List[Any] = [[float("""inf""" ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__UpperCAmelCase : Any = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
a : Any = int(input("Enter number of vertices: "))
a : Optional[int] = int(input("Enter number of edges: "))
a : List[Any] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
a : Any = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
a : str = int(input("Enter source:"))
a : Optional[int] = int(input("Enter destination:"))
a : Optional[Any] = float(input("Enter weight:"))
a : Union[str, Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
def lowerCamelCase__ ( ):
__UpperCAmelCase : Optional[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__UpperCAmelCase : str = 6
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Tuple = 1901
__UpperCAmelCase : Union[str, Any] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__UpperCAmelCase : Optional[int] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__UpperCAmelCase : int = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__UpperCAmelCase : Optional[int] = day - days_per_month[month - 2]
if month > 12:
year += 1
__UpperCAmelCase : List[Any] = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 63 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
a : Dict = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
a : int = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
a : List[Any] = "zero2"
a : Union[str, Any] = "zero3"
a : Optional[Any] = [ZEROa, ZEROa]
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__UpperCAmelCase : str = parameterized.to_safe_name("""_""".join(str(__lowerCamelCase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
a : Tuple = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a ( lowercase__ ):
"""simple docstring"""
@parameterized.expand(__lowercase , name_func=__lowercase )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Any , __lowercase : str ) -> Optional[int]:
self.run_and_check(
stage=__lowercase , model=__lowercase , distributed=__lowercase , fpaa=__lowercase , )
@require_torch_multi_gpu
@parameterized.expand(__lowercase , name_func=__lowercase )
def UpperCAmelCase ( self : List[Any] , __lowercase : Any , __lowercase : Tuple ) -> Optional[int]:
self.run_and_check(
stage=__lowercase , model=__lowercase , distributed=__lowercase , fpaa=__lowercase , )
@parameterized.expand(__lowercase , name_func=__lowercase )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : str , __lowercase : Union[str, Any] ) -> Optional[Any]:
self.run_and_check(
stage=__lowercase , model=__lowercase , distributed=__lowercase , fpaa=__lowercase , )
@require_torch_multi_gpu
@parameterized.expand(__lowercase , name_func=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : List[Any] , __lowercase : Union[str, Any] ) -> Dict:
self.run_and_check(
stage=__lowercase , model=__lowercase , distributed=__lowercase , fpaa=__lowercase , )
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[Any] ) -> Any:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase ( self : Any , __lowercase : str , __lowercase : str , __lowercase : int = 10 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : bool = True , ) -> str:
__UpperCAmelCase : Optional[Any] = models[model]
__UpperCAmelCase : int = self.run_trainer(
stage=__lowercase , model_name=__lowercase , eval_steps=__lowercase , num_train_epochs=1 , distributed=__lowercase , fpaa=__lowercase , )
self.do_checks(__lowercase )
return output_dir
def UpperCAmelCase ( self : List[str] , __lowercase : str , __lowercase : str , __lowercase : int = 10 , __lowercase : int = 1 , __lowercase : bool = True , __lowercase : bool = True , ) -> Dict:
__UpperCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir("""./xxx""" , after=__lowercase )
__UpperCAmelCase : List[str] = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__UpperCAmelCase : Tuple = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__UpperCAmelCase : Union[str, Any] = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__UpperCAmelCase : List[Any] = self.get_launcher(__lowercase )
__UpperCAmelCase : Tuple = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowercase , env=self.get_env() )
return output_dir
def UpperCAmelCase ( self : Tuple , __lowercase : List[Any]=False ) -> Tuple:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__UpperCAmelCase : Optional[int] = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 63 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a : int = logging.get_logger(__name__) # pylint: disable=invalid-name
a : List[Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=8 ):
__UpperCAmelCase : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[Any]=512 ):
__UpperCAmelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__UpperCAmelCase : int = np.array(pil_image.convert("""RGB""" ) )
__UpperCAmelCase : Optional[Any] = arr.astype(np.floataa ) / 1_2_7.5 - 1
__UpperCAmelCase : Optional[int] = np.transpose(__lowerCamelCase , [2, 0, 1] )
__UpperCAmelCase : List[str] = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
return image
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , __lowercase : UNetaDConditionModel , __lowercase : DDPMScheduler , __lowercase : VQModel , ) -> str:
super().__init__()
self.register_modules(
unet=__lowercase , scheduler=__lowercase , movq=__lowercase , )
__UpperCAmelCase : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self : List[Any] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int] ) -> Any:
# get the original timestep using init_timestep
__UpperCAmelCase : int = min(int(num_inference_steps * strength ) , __lowercase )
__UpperCAmelCase : Optional[int] = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : List[Any] , __lowercase : int , __lowercase : Tuple=None ) -> List[str]:
if not isinstance(__lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowercase )}""" )
__UpperCAmelCase : List[Any] = image.to(device=__lowercase , dtype=__lowercase )
__UpperCAmelCase : List[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__UpperCAmelCase : str = image
else:
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__lowercase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowercase )
]
__UpperCAmelCase : List[str] = torch.cat(__lowercase , dim=0 )
else:
__UpperCAmelCase : Optional[Any] = self.movq.encode(__lowercase ).latent_dist.sample(__lowercase )
__UpperCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
__UpperCAmelCase : List[Any] = torch.cat([init_latents] , dim=0 )
__UpperCAmelCase : Dict = init_latents.shape
__UpperCAmelCase : int = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
# get latents
__UpperCAmelCase : Union[str, Any] = self.scheduler.add_noise(__lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Dict = init_latents
return latents
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__UpperCAmelCase : int = torch.device(f"""cuda:{gpu_id}""" )
__UpperCAmelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] , __lowercase : List[Any]=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__UpperCAmelCase : Tuple = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : int = cpu_offload_with_hook(__lowercase , __lowercase , prev_module_hook=__lowercase )
# We'll offload the last model manually.
__UpperCAmelCase : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self : str ) -> Optional[int]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowercase )
def __call__( self : Tuple , __lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowercase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , __lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowercase : int = 512 , __lowercase : int = 512 , __lowercase : int = 100 , __lowercase : float = 4.0 , __lowercase : float = 0.3 , __lowercase : int = 1 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , ) -> Dict:
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Tuple = torch.cat(__lowercase , dim=0 )
__UpperCAmelCase : List[Any] = image_embeds.shape[0]
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : List[str] = torch.cat(__lowercase , dim=0 )
if do_classifier_free_guidance:
__UpperCAmelCase : List[Any] = image_embeds.repeat_interleave(__lowercase , dim=0 )
__UpperCAmelCase : List[str] = negative_image_embeds.repeat_interleave(__lowercase , dim=0 )
__UpperCAmelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowercase )
if not isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Dict = [image]
if not all(isinstance(__lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(__lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__UpperCAmelCase : str = torch.cat([prepare_image(__lowercase , __lowercase , __lowercase ) for i in image] , dim=0 )
__UpperCAmelCase : str = image.to(dtype=image_embeds.dtype , device=__lowercase )
__UpperCAmelCase : List[str] = self.movq.encode(__lowercase )["""latents"""]
__UpperCAmelCase : Any = latents.repeat_interleave(__lowercase , dim=0 )
self.scheduler.set_timesteps(__lowercase , device=__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.get_timesteps(__lowercase , __lowercase , __lowercase )
__UpperCAmelCase : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__UpperCAmelCase , __UpperCAmelCase : Dict = downscale_height_and_width(__lowercase , __lowercase , self.movq_scale_factor )
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
__lowercase , __lowercase , __lowercase , __lowercase , image_embeds.dtype , __lowercase , __lowercase )
for i, t in enumerate(self.progress_bar(__lowercase ) ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase : Dict = {"""image_embeds""": image_embeds}
__UpperCAmelCase : Dict = self.unet(
sample=__lowercase , timestep=__lowercase , encoder_hidden_states=__lowercase , added_cond_kwargs=__lowercase , return_dict=__lowercase , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__UpperCAmelCase , __UpperCAmelCase : str = noise_pred.chunk(2 )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = variance_pred.chunk(2 )
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Optional[int] = self.scheduler.step(
__lowercase , __lowercase , __lowercase , generator=__lowercase , )[0]
# post-processing
__UpperCAmelCase : List[str] = self.movq.decode(__lowercase , force_not_quantize=__lowercase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__UpperCAmelCase : Any = image * 0.5 + 0.5
__UpperCAmelCase : str = image.clamp(0 , 1 )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 63 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
class a :
"""simple docstring"""
def __init__( self : Optional[int] , __lowercase : int ) -> None:
__UpperCAmelCase : Any = size
__UpperCAmelCase : Dict = [0] * size
__UpperCAmelCase : Union[str, Any] = [0] * size
@staticmethod
def UpperCAmelCase ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase ( self : Any , __lowercase : int , __lowercase : int ) -> None:
__UpperCAmelCase : Dict = value
while index < self.size:
__UpperCAmelCase : Optional[Any] = self.get_prev(__lowercase ) + 1
if current_left_border == index:
__UpperCAmelCase : List[Any] = value
else:
__UpperCAmelCase : List[str] = max(__lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Any = self.get_next(__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
__UpperCAmelCase : str = 0
while left <= right:
__UpperCAmelCase : str = self.get_prev(__lowercase )
if left <= current_left:
__UpperCAmelCase : str = max(__lowercase , self.tree[right] )
__UpperCAmelCase : Optional[Any] = current_left
else:
__UpperCAmelCase : List[Any] = max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
# Initialise PyTorch model
__UpperCAmelCase : List[str] = AlbertConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCAmelCase : Any = AlbertForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 63 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = ['image_processor', 'tokenizer']
a : int = 'ChineseCLIPImageProcessor'
a : Tuple = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Dict , __lowercase : Union[str, Any]=None , __lowercase : List[str]=None , **__lowercase : Any ) -> Optional[int]:
__UpperCAmelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowercase , )
__UpperCAmelCase : Any = kwargs.pop("""feature_extractor""" )
__UpperCAmelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowercase , __lowercase )
__UpperCAmelCase : List[str] = self.image_processor
def __call__( self : Optional[Any] , __lowercase : Tuple=None , __lowercase : int=None , __lowercase : Union[str, Any]=None , **__lowercase : Union[str, Any] ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__UpperCAmelCase : Optional[int] = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
__UpperCAmelCase : int = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
__UpperCAmelCase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def UpperCAmelCase ( self : Tuple , *__lowercase : Optional[int] , **__lowercase : str ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : List[Any] , *__lowercase : List[Any] , **__lowercase : List[Any] ) -> Dict:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : Dict = self.tokenizer.model_input_names
__UpperCAmelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowercase , )
return self.image_processor_class
| 63 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase__ ( ):
__UpperCAmelCase : int = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
__UpperCAmelCase : Dict = Dataset.from_dict(__lowerCamelCase )
return dataset
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = get_dataset()
__UpperCAmelCase : Dict = make_duplicate_clusters(__lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__UpperCAmelCase : int = get_dataset()
__UpperCAmelCase , __UpperCAmelCase : Dict = deduplicate_dataset(__lowercase )
self.assertEqual(len(__lowercase ) , 2 )
print(__lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __lowercase )
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[int] ): # This function is recursive
__UpperCAmelCase : Optional[Any] = len(__lowerCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase : str = array[0]
__UpperCAmelCase : int = False
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase : List[str] = longest_subsequence(__lowerCamelCase )
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
__UpperCAmelCase : str = temp_array
else:
i += 1
__UpperCAmelCase : Tuple = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase : Union[str, Any] = [pivot, *longest_subsequence(__lowerCamelCase )]
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : List[Any]=13 , __lowercase : List[str]=7 , __lowercase : Any=True , __lowercase : Tuple=True , __lowercase : List[Any]=False , __lowercase : Any=True , __lowercase : List[str]=99 , __lowercase : Any=32 , __lowercase : Optional[Any]=5 , __lowercase : Dict=4 , __lowercase : Optional[Any]=37 , __lowercase : int="gelu" , __lowercase : Optional[int]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Tuple=512 , __lowercase : str=16 , __lowercase : Dict=2 , __lowercase : Any=0.02 , __lowercase : Optional[Any]=3 , __lowercase : Any=4 , __lowercase : Optional[Any]=None , ) -> List[Any]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : Any = use_input_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Tuple = type_sequence_label_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Any = num_choices
__UpperCAmelCase : Tuple = scope
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_input_mask:
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : str = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Any ) -> int:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self : Any , __lowercase : str , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : str ) -> str:
__UpperCAmelCase : Any = DistilBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Any = model(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Dict ) -> int:
__UpperCAmelCase : Union[str, Any] = DistilBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Dict = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : str , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : int , __lowercase : Tuple , __lowercase : List[str] ) -> Tuple:
__UpperCAmelCase : Tuple = DistilBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[Any] = model(
__lowercase , attention_mask=__lowercase , start_positions=__lowercase , end_positions=__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : Tuple , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : int ) -> str:
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : List[str] = DistilBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Dict , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : str , __lowercase : Any , __lowercase : str ) -> int:
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : Optional[int] = DistilBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Any = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : List[str] , __lowercase : int ) -> str:
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : Dict = DistilBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__lowercase , attention_mask=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : int = config_and_inputs
__UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a : Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple = True
a : Tuple = True
a : Any = True
a : Optional[Any] = True
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : Dict = DistilBertModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowercase , dim=37 )
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Any ) -> Any:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowercase )
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowercase )
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowercase )
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowercase )
def UpperCAmelCase ( self : Tuple ) -> int:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowercase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = DistilBertModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
@require_torch_gpu
def UpperCAmelCase ( self : List[Any] ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Dict = model_class(config=__lowercase )
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowercase , __lowercase )
__UpperCAmelCase : Dict = torch.jit.trace(
__lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowercase , os.path.join(__lowercase , """traced_model.pt""" ) )
__UpperCAmelCase : int = torch.jit.load(os.path.join(__lowercase , """traced_model.pt""" ) , map_location=__lowercase )
loaded(inputs_dict["""input_ids"""].to(__lowercase ) , inputs_dict["""attention_mask"""].to(__lowercase ) )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCAmelCase : Optional[int] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCAmelCase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : List[str] = model(__lowercase , attention_mask=__lowercase )[0]
__UpperCAmelCase : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : Dict = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1e-4 ) )
| 63 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 1 |
from collections import deque
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : List[Any] = len(__lowerCamelCase )
__UpperCAmelCase : List[Any] = deque()
__UpperCAmelCase : int = [False for _ in range(__lowerCamelCase )]
__UpperCAmelCase : Dict = [-1 for _ in range(__lowerCamelCase )]
__UpperCAmelCase : Optional[Any] = index_of[:]
def strong_connect(__lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : int ):
__UpperCAmelCase : Dict = index # the number when this node is seen
__UpperCAmelCase : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCamelCase )
__UpperCAmelCase : List[str] = True
for w in g[v]:
if index_of[w] == -1:
__UpperCAmelCase : int = strong_connect(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__UpperCAmelCase : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__UpperCAmelCase : Any = []
__UpperCAmelCase : Tuple = stack.pop()
__UpperCAmelCase : List[str] = False
component.append(__lowerCamelCase )
while w != v:
__UpperCAmelCase : Dict = stack.pop()
__UpperCAmelCase : List[str] = False
component.append(__lowerCamelCase )
components.append(__lowerCamelCase )
return index
__UpperCAmelCase : Union[str, Any] = []
for v in range(__lowerCamelCase ):
if index_of[v] == -1:
strong_connect(__lowerCamelCase , 0 , __lowerCamelCase )
return components
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Tuple = [[] for _ in range(__lowerCamelCase )]
for u, v in edges:
g[u].append(__lowerCamelCase )
return g
if __name__ == "__main__":
# Test
a : str = 7
a : Dict = [0, 0, 1, 2, 3, 3, 4, 4, 6]
a : Union[str, Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
a : Any = [(u, v) for u, v in zip(source, target)]
a : int = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int = 1000 ):
__UpperCAmelCase : str = 2**power
__UpperCAmelCase : List[Any] = 0
while n:
__UpperCAmelCase , __UpperCAmelCase : List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 63 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
a : Union[str, Any] = json.load(f)
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : int , __lowercase : List[Any] ) -> Tuple:
return FSMTTokenizer.from_pretrained(__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : Any ) -> Any:
__UpperCAmelCase : Any = FSMTForConditionalGeneration.from_pretrained(__lowercase ).to(__lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def UpperCAmelCase ( self : List[str] , __lowercase : List[Any] , __lowercase : Optional[int] ) -> List[str]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__UpperCAmelCase : Dict = f"""facebook/wmt19-{pair}"""
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer(__lowercase )
__UpperCAmelCase : Any = self.get_model(__lowercase )
__UpperCAmelCase : Optional[Any] = bleu_data[pair]["""src"""]
__UpperCAmelCase : Optional[Any] = bleu_data[pair]["""tgt"""]
__UpperCAmelCase : Dict = tokenizer(__lowercase , return_tensors="""pt""" , truncation=__lowercase , padding="""longest""" ).to(__lowercase )
__UpperCAmelCase : Optional[int] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(
__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )
__UpperCAmelCase : Tuple = calculate_bleu(__lowercase , __lowercase )
print(__lowercase )
self.assertGreaterEqual(scores["""bleu"""] , __lowercase )
| 63 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 1 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : int , __lowercase : str , __lowercase : Any=3 , __lowercase : str=32 , __lowercase : Tuple=3 , __lowercase : int=10 , __lowercase : Tuple=[10, 20, 30, 40] , __lowercase : Tuple=[1, 1, 2, 1] , __lowercase : Tuple=True , __lowercase : List[str]=True , __lowercase : str="relu" , __lowercase : Optional[Any]=3 , __lowercase : List[Any]=None , ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : Optional[Any] = depths
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : str = scope
__UpperCAmelCase : Dict = len(__lowercase )
def UpperCAmelCase ( self : Any ) -> int:
__UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Dict ) -> Any:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : str ) -> str:
__UpperCAmelCase : str = RegNetModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[Any] = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Tuple ) -> Optional[Any]:
__UpperCAmelCase : str = self.num_labels
__UpperCAmelCase : Any = RegNetForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : int ) -> Tuple:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
a : Optional[int] = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
a : List[Any] = False
a : Optional[Any] = False
a : Tuple = False
a : Tuple = False
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : int = RegNetModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCAmelCase ( self : int ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : Tuple ) -> int:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase ( self : List[str] ) -> Any:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase ( self : Tuple ) -> Any:
pass
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(__lowercase )
__UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : int = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(config=__lowercase )
for name, module in model.named_modules():
if isinstance(__lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def UpperCAmelCase ( self : str ) -> str:
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : List[str] ):
__UpperCAmelCase : Dict = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Any = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : str = layer_type
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Any:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = RegNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : int ) -> List[Any]:
__UpperCAmelCase : str = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowercase )
__UpperCAmelCase : Any = self.default_image_processor
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Optional[int] = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Any = model(**__lowercase )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : Dict = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
a : Union[str, Any] = 5
a : Tuple = 10
@require_sentencepiece
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : str = SpeechaTextTokenizer
a : Optional[Any] = False
a : int = True
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
__UpperCAmelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__lowercase )
__UpperCAmelCase : Tuple = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__lowercase ) )]
__UpperCAmelCase : List[str] = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : Dict = Path(self.tmpdirname )
save_json(__lowercase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowercase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
__UpperCAmelCase : Tuple = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = """<pad>"""
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : str ) -> Any:
__UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__lowercase ) , 1001 )
def UpperCAmelCase ( self : List[Any] ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
__UpperCAmelCase : Tuple = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [289, 50, 14, 174, 386] , )
__UpperCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase ( self : Tuple ) -> Any:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class a ( unittest.TestCase ):
"""simple docstring"""
a : Any = 'valhalla/s2t_mustc_multilinguial_medium'
a : int = 'C\'est trop cool'
a : List[Any] = 'Esto es genial'
@classmethod
def UpperCAmelCase ( cls : Optional[Any] ) -> str:
__UpperCAmelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def UpperCAmelCase ( self : str ) -> Tuple:
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def UpperCAmelCase ( self : Dict ) -> List[Any]:
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
__UpperCAmelCase : Dict = [ES_CODE, 4, 1601, 47, 7647, 2]
__UpperCAmelCase : List[Any] = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
__UpperCAmelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : str = """fr"""
__UpperCAmelCase : Any = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __lowercase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : int = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__UpperCAmelCase : Optional[int] = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 63 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
a : List[Any] = TypeVar("T")
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : str , __lowercase : T ) -> None:
__UpperCAmelCase : Optional[int] = data
__UpperCAmelCase : Optional[Any] = self
__UpperCAmelCase : Optional[int] = 0
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ) -> None:
# map from node name to the node object
__UpperCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def UpperCAmelCase ( self : List[str] , __lowercase : T ) -> None:
# create a new set with x as its member
__UpperCAmelCase : Optional[int] = DisjointSetTreeNode(__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : T ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
__UpperCAmelCase : Any = self.map[data]
if elem_ref != elem_ref.parent:
__UpperCAmelCase : List[Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase ( self : Tuple , __lowercase : DisjointSetTreeNode[T] , __lowercase : DisjointSetTreeNode[T] ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
__UpperCAmelCase : Dict = nodea
else:
__UpperCAmelCase : List[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase ( self : int , __lowercase : T , __lowercase : T ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(__lowercase ) , self.find_set(__lowercase ) )
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
__UpperCAmelCase : dict[T, dict[T, int]] = {}
def UpperCAmelCase ( self : Dict , __lowercase : T ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
__UpperCAmelCase : List[Any] = {}
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : T , __lowercase : T , __lowercase : int ) -> None:
# add an edge with the given weight
self.add_node(__lowercase )
self.add_node(__lowercase )
__UpperCAmelCase : Any = weight
__UpperCAmelCase : Any = weight
def UpperCAmelCase ( self : Tuple ) -> GraphUndirectedWeighted[T]:
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __lowercase : x[2] )
# creating the disjoint set
__UpperCAmelCase : str = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__lowercase )
# MST generation
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : str = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = edges[index]
index += 1
__UpperCAmelCase : str = disjoint_set.find_set(__lowercase )
__UpperCAmelCase : Union[str, Any] = disjoint_set.find_set(__lowercase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__lowercase , __lowercase , __lowercase )
disjoint_set.union(__lowercase , __lowercase )
return graph
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
from collections import namedtuple
a : List[Any] = namedtuple("from_to", "from_ to")
a : Tuple = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.0_01, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_04_54, 2_64.1_72),
"cubicyard": from_to(0.7_64_55, 1.3_07_95),
"cubicfoot": from_to(0.0_28, 35.31_47),
"cup": from_to(0.0_00_23_65_88, 42_26.75),
}
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : str , __lowerCamelCase : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : tuple[int, int] , __lowerCamelCase : int ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = position
__UpperCAmelCase : Tuple = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__UpperCAmelCase : Dict = []
for position in positions:
__UpperCAmelCase , __UpperCAmelCase : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowerCamelCase )
return permissible_positions
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : int ):
if is_complete(__lowerCamelCase ):
return True
for position in get_valid_pos(__lowerCamelCase , len(__lowerCamelCase ) ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = position
if board[y][x] == 0:
__UpperCAmelCase : List[Any] = curr + 1
if open_knight_tour_helper(__lowerCamelCase , __lowerCamelCase , curr + 1 ):
return True
__UpperCAmelCase : Union[str, Any] = 0
return False
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : Union[str, Any] = [[0 for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = 1
if open_knight_tour_helper(__lowerCamelCase , (i, j) , 1 ):
return board
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
a : Optional[int] = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowerCamelCase ) , version.parse(__lowerCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
__UpperCAmelCase : Optional[int] = f"""\n{hint}""" if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , __lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = requirement, None, None
else:
__UpperCAmelCase : Optional[int] = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , __lowerCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f""" got {requirement}""" )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = match[0]
__UpperCAmelCase : str = want_full.split(""",""" ) # there could be multiple requirements
__UpperCAmelCase : Dict = {}
for w in want_range:
__UpperCAmelCase : Optional[int] = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , __lowerCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f""" but got {requirement}""" )
__UpperCAmelCase , __UpperCAmelCase : List[str] = match[0]
__UpperCAmelCase : Optional[Any] = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__UpperCAmelCase : Optional[Any] = """.""".join([str(__lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return
# check if any version is installed
try:
__UpperCAmelCase : Union[str, Any] = importlib.metadata.version(__lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Union[str, Any] = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(__lowerCamelCase , __lowerCamelCase )
| 63 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : jnp.ndarray
@flax_register_to_config
class a ( nn.Module , lowercase__ , lowercase__ ):
"""simple docstring"""
a : int = 32
a : int = 4
a : int = 4
a : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
a : Union[bool, Tuple[bool]] = False
a : Tuple[int] = (320, 640, 1_280, 1_280)
a : int = 2
a : Union[int, Tuple[int]] = 8
a : Optional[Union[int, Tuple[int]]] = None
a : int = 1_280
a : float = 0.0
a : bool = False
a : jnp.dtype = jnp.floataa
a : bool = True
a : int = 0
a : bool = False
def UpperCAmelCase ( self : int , __lowercase : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
__UpperCAmelCase : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
__UpperCAmelCase : Optional[int] = jnp.zeros(__lowercase , dtype=jnp.floataa )
__UpperCAmelCase : Optional[int] = jnp.ones((1,) , dtype=jnp.intaa )
__UpperCAmelCase : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__UpperCAmelCase , __UpperCAmelCase : Tuple = jax.random.split(__lowercase )
__UpperCAmelCase : int = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__lowercase , __lowercase , __lowercase , __lowercase )["params"]
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : str = self.block_out_channels
__UpperCAmelCase : Union[str, Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__UpperCAmelCase : Tuple = self.num_attention_heads or self.attention_head_dim
# input
__UpperCAmelCase : str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__UpperCAmelCase : Dict = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__UpperCAmelCase : Optional[int] = FlaxTimestepEmbedding(__lowercase , dtype=self.dtype )
__UpperCAmelCase : int = self.only_cross_attention
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : int = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__UpperCAmelCase : Dict = output_channel
__UpperCAmelCase : Optional[Any] = block_out_channels[i]
__UpperCAmelCase : Union[str, Any] = i == len(__lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__UpperCAmelCase : List[str] = FlaxCrossAttnDownBlockaD(
in_channels=__lowercase , out_channels=__lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__UpperCAmelCase : List[Any] = FlaxDownBlockaD(
in_channels=__lowercase , out_channels=__lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowercase )
__UpperCAmelCase : Union[str, Any] = down_blocks
# mid
__UpperCAmelCase : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Tuple = list(reversed(__lowercase ) )
__UpperCAmelCase : Any = list(reversed(__lowercase ) )
__UpperCAmelCase : Tuple = list(reversed(__lowercase ) )
__UpperCAmelCase : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__UpperCAmelCase : Dict = output_channel
__UpperCAmelCase : Any = reversed_block_out_channels[i]
__UpperCAmelCase : Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(__lowercase ) - 1 )]
__UpperCAmelCase : Union[str, Any] = i == len(__lowercase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__UpperCAmelCase : Optional[Any] = FlaxCrossAttnUpBlockaD(
in_channels=__lowercase , out_channels=__lowercase , prev_output_channel=__lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__UpperCAmelCase : Any = FlaxUpBlockaD(
in_channels=__lowercase , out_channels=__lowercase , prev_output_channel=__lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__lowercase )
__UpperCAmelCase : List[str] = output_channel
__UpperCAmelCase : Any = up_blocks
# out
__UpperCAmelCase : str = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , __lowercase : List[str] , __lowercase : int , __lowercase : Dict , __lowercase : int=None , __lowercase : Optional[int]=None , __lowercase : bool = True , __lowercase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(__lowercase , jnp.ndarray ):
__UpperCAmelCase : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : int = timesteps.astype(dtype=jnp.floataa )
__UpperCAmelCase : List[Any] = jnp.expand_dims(__lowercase , 0 )
__UpperCAmelCase : List[str] = self.time_proj(__lowercase )
__UpperCAmelCase : Tuple = self.time_embedding(__lowercase )
# 2. pre-process
__UpperCAmelCase : Any = jnp.transpose(__lowercase , (0, 2, 3, 1) )
__UpperCAmelCase : Optional[int] = self.conv_in(__lowercase )
# 3. down
__UpperCAmelCase : str = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = down_block(__lowercase , __lowercase , __lowercase , deterministic=not train )
else:
__UpperCAmelCase , __UpperCAmelCase : int = down_block(__lowercase , __lowercase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__UpperCAmelCase : str = ()
for down_block_res_sample, down_block_additional_residual in zip(
__lowercase , __lowercase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__UpperCAmelCase : Tuple = new_down_block_res_samples
# 4. mid
__UpperCAmelCase : List[str] = self.mid_block(__lowercase , __lowercase , __lowercase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__UpperCAmelCase : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
__UpperCAmelCase : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : List[Any] = up_block(
__lowercase , temb=__lowercase , encoder_hidden_states=__lowercase , res_hidden_states_tuple=__lowercase , deterministic=not train , )
else:
__UpperCAmelCase : Union[str, Any] = up_block(__lowercase , temb=__lowercase , res_hidden_states_tuple=__lowercase , deterministic=not train )
# 6. post-process
__UpperCAmelCase : Any = self.conv_norm_out(__lowercase )
__UpperCAmelCase : Tuple = nn.silu(__lowercase )
__UpperCAmelCase : List[str] = self.conv_out(__lowercase )
__UpperCAmelCase : str = jnp.transpose(__lowercase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__lowercase )
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
from typing import Any
def lowerCamelCase__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : dict , __lowerCamelCase : dict , __lowerCamelCase : dict , ):
_validation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# Creates data structures and fill initial step
__UpperCAmelCase : dict = {}
__UpperCAmelCase : dict = {}
for state in states_space:
__UpperCAmelCase : List[str] = observations_space[0]
__UpperCAmelCase : Union[str, Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase : Optional[int] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCamelCase ) ):
__UpperCAmelCase : List[Any] = observations_space[o]
__UpperCAmelCase : Any = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase : Optional[int] = """"""
__UpperCAmelCase : Tuple = -1
for k_state in states_space:
__UpperCAmelCase : str = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase : int = probability
__UpperCAmelCase : int = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase : Optional[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase : Tuple = arg_max
# The final observation
__UpperCAmelCase : int = observations_space[len(__lowerCamelCase ) - 1]
# argmax for given final observation
__UpperCAmelCase : Tuple = """"""
__UpperCAmelCase : List[str] = -1
for k_state in states_space:
__UpperCAmelCase : Dict = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase : Dict = probability
__UpperCAmelCase : Tuple = k_state
__UpperCAmelCase : Tuple = arg_max
# Process pointers backwards
__UpperCAmelCase : Tuple = last_state
__UpperCAmelCase : Dict = []
for o in range(len(__lowerCamelCase ) - 1 , -1 , -1 ):
result.append(__lowerCamelCase )
__UpperCAmelCase : str = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
_validate_not_empty(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_validate_lists(__lowerCamelCase , __lowerCamelCase )
_validate_dicts(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any ):
_validate_list(__lowerCamelCase , """observations_space""" )
_validate_list(__lowerCamelCase , """states_space""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
if not isinstance(_object , __lowerCamelCase ):
__UpperCAmelCase : List[str] = f"""{var_name} must be a list"""
raise ValueError(__lowerCamelCase )
else:
for x in _object:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Dict = f"""{var_name} must be a list of strings"""
raise ValueError(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
_validate_dict(__lowerCamelCase , """initial_probabilities""" , __lowerCamelCase )
_validate_nested_dict(__lowerCamelCase , """transition_probabilities""" )
_validate_nested_dict(__lowerCamelCase , """emission_probabilities""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
_validate_dict(_object , __lowerCamelCase , __lowerCamelCase )
for x in _object.values():
_validate_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : type , __lowerCamelCase : bool = False ):
if not isinstance(_object , __lowerCamelCase ):
__UpperCAmelCase : Any = f"""{var_name} must be a dict"""
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object ):
__UpperCAmelCase : Union[str, Any] = f"""{var_name} all keys must be strings"""
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object.values() ):
__UpperCAmelCase : int = """nested dictionary """ if nested else """"""
__UpperCAmelCase : Optional[Any] = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 63 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
"""simple docstring"""
def UpperCAmelCase ( self : Tuple , __lowercase : List[Any] ) -> Optional[int]:
__UpperCAmelCase : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
__UpperCAmelCase : Optional[Any] = [tuple(__lowercase ) if isinstance(__lowercase , __lowercase ) else key for key in keys]
__UpperCAmelCase : List[Any] = Counter(__lowercase )
__UpperCAmelCase : List[str] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCAmelCase ( self : List[Any] , __lowercase : Any , __lowercase : str=False ) -> int:
__UpperCAmelCase : Union[str, Any] = super().construct_mapping(__lowercase , deep=__lowercase )
self._check_no_duplicates_on_constructed_node(__lowercase )
return mapping
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : Dict = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__UpperCAmelCase : int = full_content[1:].index("""---""" ) + 1
__UpperCAmelCase : Any = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__lowerCamelCase )
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , __lowercase : Path ) -> "DatasetMetadata":
with open(__lowercase , encoding="""utf-8""" ) as readme_file:
__UpperCAmelCase , __UpperCAmelCase : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowercase )
else:
return cls()
def UpperCAmelCase ( self : Optional[int] , __lowercase : Path ) -> Tuple:
if path.exists():
with open(__lowercase , encoding="""utf-8""" ) as readme_file:
__UpperCAmelCase : int = readme_file.read()
else:
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[Any] = self._to_readme(__lowercase )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(__lowercase )
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[str] = None ) -> str:
if readme_content is not None:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = _split_yaml_from_readme(__lowercase )
__UpperCAmelCase : Any = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
__UpperCAmelCase : Tuple = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def UpperCAmelCase ( cls : Any , __lowercase : str ) -> "DatasetMetadata":
__UpperCAmelCase : Tuple = yaml.load(__lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__UpperCAmelCase : Tuple = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowercase )
def UpperCAmelCase ( self : Any ) -> str:
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowercase , allow_unicode=__lowercase , encoding="""utf-8""" , ).decode("""utf-8""" )
a : Any = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a : List[Any] = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
a : Tuple = ap.parse_args()
a : List[Any] = Path(args.readme_filepath)
a : Dict = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 63 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 1 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class a :
"""simple docstring"""
def __init__( self : List[Any] , __lowercase : Optional[Any] , ) -> int:
__UpperCAmelCase : str = parent
__UpperCAmelCase : str = 13
__UpperCAmelCase : List[str] = 7
__UpperCAmelCase : int = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = True
__UpperCAmelCase : List[str] = 99
__UpperCAmelCase : str = 32
__UpperCAmelCase : str = 2
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : Tuple = 37
__UpperCAmelCase : Tuple = """gelu"""
__UpperCAmelCase : Dict = 0.1
__UpperCAmelCase : str = 0.1
__UpperCAmelCase : Tuple = 512
__UpperCAmelCase : str = 16
__UpperCAmelCase : Union[str, Any] = 2
__UpperCAmelCase : List[Any] = 0.02
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Dict = None
def UpperCAmelCase ( self : str ) -> Optional[int]:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Tuple = None
if self.use_input_mask:
__UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : int = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Any , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Dict ) -> Dict:
__UpperCAmelCase : Tuple = TFDistilBertModel(config=__lowercase )
__UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCAmelCase : Union[str, Any] = model(__lowercase )
__UpperCAmelCase : str = [input_ids, input_mask]
__UpperCAmelCase : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Dict , __lowercase : str , __lowercase : Dict , __lowercase : Tuple ) -> List[str]:
__UpperCAmelCase : Dict = TFDistilBertForMaskedLM(config=__lowercase )
__UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCAmelCase : Tuple = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : str , __lowercase : Dict , __lowercase : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : int = TFDistilBertForQuestionAnswering(config=__lowercase )
__UpperCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCAmelCase : int = model(__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : int , __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Any , __lowercase : Any , __lowercase : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Optional[int] = TFDistilBertForSequenceClassification(__lowercase )
__UpperCAmelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCAmelCase : Any = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : List[str] ) -> Dict:
__UpperCAmelCase : List[Any] = self.num_choices
__UpperCAmelCase : Dict = TFDistilBertForMultipleChoice(__lowercase )
__UpperCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : List[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCAmelCase : Optional[int] = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Any:
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : Optional[Any] = TFDistilBertForTokenClassification(__lowercase )
__UpperCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCAmelCase : List[str] = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Tuple = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a : Optional[Any] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : List[Any] = False
a : str = False
def UpperCAmelCase ( self : int ) -> Optional[Any]:
__UpperCAmelCase : int = TFDistilBertModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowercase , dim=37 )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowercase )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowercase )
def UpperCAmelCase ( self : Any ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowercase )
def UpperCAmelCase ( self : Dict ) -> Any:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowercase )
def UpperCAmelCase ( self : List[str] ) -> Any:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowercase )
def UpperCAmelCase ( self : Any ) -> Dict:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase : List[Any] = TFDistilBertModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__UpperCAmelCase : Tuple = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCAmelCase : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : Optional[int] = model(__lowercase )[0]
__UpperCAmelCase : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : Tuple = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-4 )
| 63 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
a : List[Any] = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
a : Any = 3e8 # unit of c : m * s^-1
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
__UpperCAmelCase : List[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__UpperCAmelCase : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__UpperCAmelCase : Optional[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a : Union[str, Any] = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a : Optional[Any] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCamelCase )[0]
@deprecated(__lowerCamelCase , """Please use tf.data to implement this functionality.""" )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=__lowerCamelCase ) as bytestream:
__UpperCAmelCase : Union[str, Any] = _readaa(__lowerCamelCase )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
__UpperCAmelCase : Any = _readaa(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = _readaa(__lowerCamelCase )
__UpperCAmelCase : List[Any] = _readaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytestream.read(rows * cols * num_images )
__UpperCAmelCase : Union[str, Any] = numpy.frombuffer(__lowerCamelCase , dtype=numpy.uinta )
__UpperCAmelCase : Union[str, Any] = data.reshape(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 1 )
return data
@deprecated(__lowerCamelCase , """Please use tf.one_hot on tensors.""" )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Any = labels_dense.shape[0]
__UpperCAmelCase : List[str] = numpy.arange(__lowerCamelCase ) * num_classes
__UpperCAmelCase : Union[str, Any] = numpy.zeros((num_labels, num_classes) )
__UpperCAmelCase : List[Any] = 1
return labels_one_hot
@deprecated(__lowerCamelCase , """Please use tf.data to implement this functionality.""" )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[int]=10 ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=__lowerCamelCase ) as bytestream:
__UpperCAmelCase : str = _readaa(__lowerCamelCase )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
__UpperCAmelCase : str = _readaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytestream.read(__lowerCamelCase )
__UpperCAmelCase : List[Any] = numpy.frombuffer(__lowerCamelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowerCamelCase , __lowerCamelCase )
return labels
class a :
"""simple docstring"""
@deprecated(
__lowercase , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : int=False , __lowercase : List[Any]=False , __lowercase : List[Any]=dtypes.floataa , __lowercase : int=True , __lowercase : Union[str, Any]=None , ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : str = random_seed.get_seed(__lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCAmelCase : Union[str, Any] = dtypes.as_dtype(__lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
__UpperCAmelCase : int = 10000
__UpperCAmelCase : Dict = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
__UpperCAmelCase : Dict = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCAmelCase : Any = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCAmelCase : Any = images.astype(numpy.floataa )
__UpperCAmelCase : Union[str, Any] = numpy.multiply(__lowercase , 1.0 / 255.0 )
__UpperCAmelCase : str = images
__UpperCAmelCase : Dict = labels
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = 0
@property
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
return self._images
@property
def UpperCAmelCase ( self : Any ) -> List[str]:
return self._labels
@property
def UpperCAmelCase ( self : Tuple ) -> str:
return self._num_examples
@property
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
return self._epochs_completed
def UpperCAmelCase ( self : Any , __lowercase : Tuple , __lowercase : List[str]=False , __lowercase : str=True ) -> List[str]:
if fake_data:
__UpperCAmelCase : Any = [1] * 784
__UpperCAmelCase : Any = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowercase )],
[fake_label for _ in range(__lowercase )],
)
__UpperCAmelCase : Dict = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCAmelCase : str = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowercase )
__UpperCAmelCase : int = self.images[perma]
__UpperCAmelCase : List[str] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCAmelCase : Tuple = self._num_examples - start
__UpperCAmelCase : int = self._images[start : self._num_examples]
__UpperCAmelCase : Optional[int] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCAmelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowercase )
__UpperCAmelCase : str = self.images[perm]
__UpperCAmelCase : List[str] = self.labels[perm]
# Start next epoch
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Optional[Any] = batch_size - rest_num_examples
__UpperCAmelCase : Any = self._index_in_epoch
__UpperCAmelCase : List[str] = self._images[start:end]
__UpperCAmelCase : Optional[int] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCAmelCase : Optional[int] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowerCamelCase , """Please write your own downloading logic.""" )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int ):
if not gfile.Exists(__lowerCamelCase ):
gfile.MakeDirs(__lowerCamelCase )
__UpperCAmelCase : List[Any] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if not gfile.Exists(__lowerCamelCase ):
urllib.request.urlretrieve(__lowerCamelCase , __lowerCamelCase ) # noqa: S310
with gfile.GFile(__lowerCamelCase ) as f:
__UpperCAmelCase : str = f.size()
print("""Successfully downloaded""" , __lowerCamelCase , __lowerCamelCase , """bytes.""" )
return filepath
@deprecated(
__lowerCamelCase , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : int=False , __lowerCamelCase : Any=False , __lowerCamelCase : Dict=dtypes.floataa , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=5000 , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__lowerCamelCase , one_hot=__lowerCamelCase , dtype=__lowerCamelCase , seed=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = fake()
__UpperCAmelCase : Optional[Any] = fake()
__UpperCAmelCase : Optional[int] = fake()
return _Datasets(train=__lowerCamelCase , validation=__lowerCamelCase , test=__lowerCamelCase )
if not source_url: # empty string check
__UpperCAmelCase : Tuple = DEFAULT_SOURCE_URL
__UpperCAmelCase : Tuple = """train-images-idx3-ubyte.gz"""
__UpperCAmelCase : Union[str, Any] = """train-labels-idx1-ubyte.gz"""
__UpperCAmelCase : Optional[int] = """t10k-images-idx3-ubyte.gz"""
__UpperCAmelCase : List[str] = """t10k-labels-idx1-ubyte.gz"""
__UpperCAmelCase : Optional[int] = _maybe_download(
__lowerCamelCase , __lowerCamelCase , source_url + train_images_file )
with gfile.Open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : Dict = _extract_images(__lowerCamelCase )
__UpperCAmelCase : Tuple = _maybe_download(
__lowerCamelCase , __lowerCamelCase , source_url + train_labels_file )
with gfile.Open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : Optional[Any] = _extract_labels(__lowerCamelCase , one_hot=__lowerCamelCase )
__UpperCAmelCase : Dict = _maybe_download(
__lowerCamelCase , __lowerCamelCase , source_url + test_images_file )
with gfile.Open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : Optional[int] = _extract_images(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = _maybe_download(
__lowerCamelCase , __lowerCamelCase , source_url + test_labels_file )
with gfile.Open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : List[str] = _extract_labels(__lowerCamelCase , one_hot=__lowerCamelCase )
if not 0 <= validation_size <= len(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = (
"""Validation size should be between 0 and """
f"""{len(__lowerCamelCase )}. Received: {validation_size}."""
)
raise ValueError(__lowerCamelCase )
__UpperCAmelCase : Dict = train_images[:validation_size]
__UpperCAmelCase : Dict = train_labels[:validation_size]
__UpperCAmelCase : Dict = train_images[validation_size:]
__UpperCAmelCase : Optional[int] = train_labels[validation_size:]
__UpperCAmelCase : Tuple = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
__UpperCAmelCase : Tuple = _DataSet(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : List[str] = _DataSet(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = _DataSet(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return _Datasets(train=__lowerCamelCase , validation=__lowerCamelCase , test=__lowerCamelCase )
| 63 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 1 |
from __future__ import annotations
from typing import TypedDict
class a ( lowercase__ ):
"""simple docstring"""
a : str
a : int
def lowerCamelCase__ ( __lowerCamelCase : str ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__lowerCamelCase ) )]
def lowerCamelCase__ ( __lowerCamelCase : str ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
__UpperCAmelCase : List[Any] = all_rotations(__lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__UpperCAmelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowerCamelCase ),
}
return response
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
__UpperCAmelCase : Tuple = int(__lowerCamelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__lowerCamelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
__UpperCAmelCase : Tuple = [""""""] * len(__lowerCamelCase )
for _ in range(len(__lowerCamelCase ) ):
for i in range(len(__lowerCamelCase ) ):
__UpperCAmelCase : Optional[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a : int = "Provide a string that I will generate its BWT transform: "
a : str = input(entry_msg).strip()
a : List[Any] = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
a : Union[str, Any] = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
)
| 63 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
a : Optional[int] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
a : int = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__UpperCAmelCase : Tuple = bs[:]
__UpperCAmelCase : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : List[str] = set()
__UpperCAmelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : str = char
return pairs
class a ( lowercase__ ):
"""simple docstring"""
a : Any = VOCAB_FILES_NAMES
a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ['input_ids', 'attention_mask']
def __init__( self : List[str] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Union[str, Any]="replace" , __lowercase : List[Any]="<s>" , __lowercase : List[str]="</s>" , __lowercase : int="</s>" , __lowercase : str="<s>" , __lowercase : Union[str, Any]="<unk>" , __lowercase : Tuple="<pad>" , __lowercase : Tuple="<mask>" , __lowercase : Optional[int]=False , **__lowercase : int , ) -> Any:
__UpperCAmelCase : List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
__UpperCAmelCase : Union[str, Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
__UpperCAmelCase : List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
__UpperCAmelCase : Dict = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
__UpperCAmelCase : List[str] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
__UpperCAmelCase : str = json.load(__lowercase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Tuple = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
__UpperCAmelCase : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : List[str] = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase ( self : Any ) -> List[str]:
return len(self.encoder )
def UpperCAmelCase ( self : Dict ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self : Any , __lowercase : Union[str, Any] ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : Tuple = tuple(__lowercase )
__UpperCAmelCase : Optional[Any] = get_pairs(__lowercase )
if not pairs:
return token
while True:
__UpperCAmelCase : Tuple = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[str] = bigram
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[int] = 0
while i < len(__lowercase ):
try:
__UpperCAmelCase : List[str] = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Tuple = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(__lowercase )
__UpperCAmelCase : Union[str, Any] = new_word
if len(__lowercase ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(__lowercase )
__UpperCAmelCase : Union[str, Any] = """ """.join(__lowercase )
__UpperCAmelCase : int = word
return word
def UpperCAmelCase ( self : Optional[Any] , __lowercase : int ) -> List[str]:
__UpperCAmelCase : List[Any] = []
for token in re.findall(self.pat , __lowercase ):
__UpperCAmelCase : Dict = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[int] ) -> str:
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self : Any , __lowercase : Any ) -> Optional[Any]:
return self.decoder.get(__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = """""".join(__lowercase )
__UpperCAmelCase : str = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Union[str, Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : Optional[int] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
__UpperCAmelCase : Tuple = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
__UpperCAmelCase : Union[str, Any] = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self : Optional[int] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
__UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def UpperCAmelCase ( self : int , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[Any] , __lowercase : Any=False , **__lowercase : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : List[Any] = """ """ + text
return (text, kwargs)
| 63 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
class a :
"""simple docstring"""
def __init__( self : str , __lowercase : List[str] ) -> List[Any]:
__UpperCAmelCase : Any = val
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Optional[Any] = None
def UpperCAmelCase ( self : Optional[Any] , __lowercase : List[str] ) -> List[str]:
if self.val:
if val < self.val:
if self.left is None:
__UpperCAmelCase : Any = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__UpperCAmelCase : Optional[Any] = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__UpperCAmelCase : Any = val
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ):
# Recursive traversal
if root:
inorder(root.left , __lowerCamelCase )
res.append(root.val )
inorder(root.right , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
# Build BST
if len(__lowerCamelCase ) == 0:
return arr
__UpperCAmelCase : List[Any] = Node(arr[0] )
for i in range(1 , len(__lowerCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__UpperCAmelCase : Tuple = []
inorder(__lowerCamelCase , __lowerCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Union[str, Any] = 'timesformer'
def __init__( self : List[Any] , __lowercase : Optional[Any]=224 , __lowercase : List[Any]=16 , __lowercase : int=3 , __lowercase : Tuple=8 , __lowercase : str=768 , __lowercase : List[Any]=12 , __lowercase : List[Any]=12 , __lowercase : Dict=3072 , __lowercase : Dict="gelu" , __lowercase : Dict=0.0 , __lowercase : Dict=0.0 , __lowercase : int=0.02 , __lowercase : List[str]=1e-6 , __lowercase : Tuple=True , __lowercase : Tuple="divided_space_time" , __lowercase : Any=0 , **__lowercase : List[Any] , ) -> Tuple:
super().__init__(**__lowercase )
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : str = num_frames
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : List[Any] = qkv_bias
__UpperCAmelCase : Union[str, Any] = attention_type
__UpperCAmelCase : Dict = drop_path_rate
| 63 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 1 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Any = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
__UpperCAmelCase : int = 1024
__UpperCAmelCase : Union[str, Any] = 4096
__UpperCAmelCase : Any = 24
__UpperCAmelCase : int = 16
__UpperCAmelCase : str = [5, 11, 17, 23]
__UpperCAmelCase : List[Any] = [256, 512, 1024, 1024]
__UpperCAmelCase : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : Dict = """huggingface/label-files"""
__UpperCAmelCase : Dict = """ade20k-id2label.json"""
__UpperCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__UpperCAmelCase : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
__UpperCAmelCase : Dict = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__UpperCAmelCase : str = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__UpperCAmelCase : int = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__UpperCAmelCase : List[str] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__UpperCAmelCase : str = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__UpperCAmelCase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__UpperCAmelCase : Dict = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__UpperCAmelCase : Tuple = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__UpperCAmelCase : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__UpperCAmelCase : Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__UpperCAmelCase : int = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__UpperCAmelCase : str = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__UpperCAmelCase : str = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__UpperCAmelCase : Optional[int] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__UpperCAmelCase : Any = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__UpperCAmelCase : Any = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__UpperCAmelCase : Dict = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__UpperCAmelCase : Dict = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__UpperCAmelCase : List[Any] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__UpperCAmelCase : Optional[Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__UpperCAmelCase : Dict = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__UpperCAmelCase : Dict = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__UpperCAmelCase : int = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__UpperCAmelCase : Tuple = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__UpperCAmelCase : int = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__UpperCAmelCase : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__UpperCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__UpperCAmelCase : Dict = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__UpperCAmelCase : List[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__UpperCAmelCase : Dict = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__UpperCAmelCase : Any = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__UpperCAmelCase : Dict = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__UpperCAmelCase : List[Any] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__UpperCAmelCase : Any = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__UpperCAmelCase : List[str] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__UpperCAmelCase : Tuple = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__UpperCAmelCase : List[Any] = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__UpperCAmelCase : Tuple = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__UpperCAmelCase : int = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase : str = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__UpperCAmelCase : int = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : List[Any] = in_proj_weight[: config.hidden_size, :]
__UpperCAmelCase : int = in_proj_bias[: config.hidden_size]
__UpperCAmelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
__UpperCAmelCase : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int ):
__UpperCAmelCase , __UpperCAmelCase : Dict = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
__UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
__UpperCAmelCase : List[Any] = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
__UpperCAmelCase : Optional[int] = DPTForSemanticSegmentation(__lowerCamelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
__UpperCAmelCase : Dict = 480 if """ade""" in checkpoint_url else 384
__UpperCAmelCase : List[str] = DPTImageProcessor(size=__lowerCamelCase )
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : str = image_processor(__lowerCamelCase , return_tensors="""pt""" )
# forward pass
__UpperCAmelCase : int = model(**__lowerCamelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
# Assert logits
__UpperCAmelCase : List[Any] = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
__UpperCAmelCase : Union[str, Any] = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__lowerCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __lowerCamelCase )
)
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
a : Any = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 63 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[float] ):
if len(__lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__UpperCAmelCase : Optional[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a :
"""simple docstring"""
def __init__( self : Any , __lowercase : Dict , __lowercase : int = 13 , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 3 , __lowercase : int = 3 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : int = 128 , __lowercase : List[str]=[16, 32, 64, 128] , __lowercase : int = 7 , __lowercase : int = 4 , __lowercase : int = 37 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 10 , __lowercase : float = 0.02 , __lowercase : int = 2 , __lowercase : int = 1 , __lowercase : int = 128 , __lowercase : List[int] = [2, 2, 2, 2] , __lowercase : int = 2 , __lowercase : int = 2 , ) -> int:
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : str = encoder_stride
__UpperCAmelCase : List[Any] = num_attention_outputs
__UpperCAmelCase : str = embed_dim
__UpperCAmelCase : Tuple = embed_dim + 1
__UpperCAmelCase : Tuple = resolution
__UpperCAmelCase : Union[str, Any] = depths
__UpperCAmelCase : List[Any] = hidden_sizes
__UpperCAmelCase : Optional[Any] = dim
__UpperCAmelCase : Union[str, Any] = mlp_expansion_ratio
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : int = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCAmelCase ( self : Optional[int] , __lowercase : int , __lowercase : List[str] , __lowercase : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Tuple = TFEfficientFormerModel(config=__lowercase )
__UpperCAmelCase : int = model(__lowercase , training=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : Any ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = TFEfficientFormerForImageClassification(__lowercase )
__UpperCAmelCase : List[str] = model(__lowercase , labels=__lowercase , training=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Optional[Any] = TFEfficientFormerForImageClassification(__lowercase )
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : str = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] ) -> int:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Dict = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a : int = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a : str = False
a : Union[str, Any] = False
a : Union[str, Any] = False
a : str = False
a : int = False
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase : int = TFEfficientFormerModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def UpperCAmelCase ( self : Any ) -> str:
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def UpperCAmelCase ( self : Dict ) -> Dict:
pass
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(__lowercase )
__UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
def check_hidden_states_output(__lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] ):
__UpperCAmelCase : Optional[int] = model_class(__lowercase )
__UpperCAmelCase : Any = model(**self._prepare_for_class(__lowercase , __lowercase ) , training=__lowercase )
__UpperCAmelCase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : int = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowercase ) , __lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__UpperCAmelCase : List[str] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__UpperCAmelCase : Any = seq_length * self.model_tester.chunk_length
else:
__UpperCAmelCase : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__UpperCAmelCase : str = outputs.decoder_hidden_states
self.asseretIsInstance(__lowercase , (list, tuple) )
self.assertEqual(len(__lowercase ) , __lowercase )
__UpperCAmelCase : Dict = getattr(self.model_tester , """seq_length""" , __lowercase )
__UpperCAmelCase : str = getattr(self.model_tester , """decoder_seq_length""" , __lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCAmelCase ( self : str , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any]=False ) -> Dict:
__UpperCAmelCase : Optional[Any] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase ( self : str ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def UpperCAmelCase ( self : str ) -> Any:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase )
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : int ) -> int:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = TFEfficientFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[Any] = getattr(self.model_tester , """seq_length""" , __lowercase )
__UpperCAmelCase : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , __lowercase )
__UpperCAmelCase : Any = getattr(self.model_tester , """key_length""" , __lowercase )
__UpperCAmelCase : Optional[Any] = getattr(self.model_tester , """chunk_length""" , __lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__UpperCAmelCase : Dict = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = model_class(__lowercase )
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowercase , __lowercase ) , training=__lowercase )
__UpperCAmelCase : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = model_class(__lowercase )
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowercase , __lowercase ) , training=__lowercase )
__UpperCAmelCase : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCAmelCase ( self : List[Any] ) -> Any:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__UpperCAmelCase : int = model_class(__lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__UpperCAmelCase : Tuple = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__UpperCAmelCase : str = model(__lowercase )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ):
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : Any ) -> List[Any]:
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Any = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : Optional[Any] = image_processor(images=__lowercase , return_tensors="""tf""" )
# forward pass
__UpperCAmelCase : Optional[Any] = model(**__lowercase , training=__lowercase )
# verify the logits
__UpperCAmelCase : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : Optional[int] = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : Tuple = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : int = prepare_img()
__UpperCAmelCase : Dict = image_processor(images=__lowercase , return_tensors="""tf""" )
# forward pass
__UpperCAmelCase : Any = model(**__lowercase , training=__lowercase )
# verify the logits
__UpperCAmelCase : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 63 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 1 |
from __future__ import annotations
import queue
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowercase : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = data
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Union[str, Any] = None
def lowerCamelCase__ ( ):
print("""\n********Press N to stop entering at any point of time********\n""" )
__UpperCAmelCase : List[str] = input("""Enter the value of the root node: """ ).strip().lower()
__UpperCAmelCase : queue.Queue = queue.Queue()
__UpperCAmelCase : Any = TreeNode(int(__lowerCamelCase ) )
q.put(__lowerCamelCase )
while not q.empty():
__UpperCAmelCase : Optional[int] = q.get()
__UpperCAmelCase : str = f"""Enter the left node of {node_found.data}: """
__UpperCAmelCase : List[str] = input(__lowerCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
__UpperCAmelCase : Union[str, Any] = TreeNode(int(__lowerCamelCase ) )
__UpperCAmelCase : int = left_node
q.put(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = f"""Enter the right node of {node_found.data}: """
__UpperCAmelCase : str = input(__lowerCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
__UpperCAmelCase : Any = TreeNode(int(__lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = right_node
q.put(__lowerCamelCase )
raise
def lowerCamelCase__ ( __lowerCamelCase : TreeNode ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase__ ( __lowerCamelCase : TreeNode ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def lowerCamelCase__ ( __lowerCamelCase : TreeNode ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def lowerCamelCase__ ( __lowerCamelCase : TreeNode ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node:
return
__UpperCAmelCase : queue.Queue = queue.Queue()
q.put(__lowerCamelCase )
while not q.empty():
__UpperCAmelCase : int = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase__ ( __lowerCamelCase : TreeNode ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node:
return
__UpperCAmelCase : queue.Queue = queue.Queue()
q.put(__lowerCamelCase )
while not q.empty():
__UpperCAmelCase : Optional[int] = []
while not q.empty():
__UpperCAmelCase : int = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : TreeNode ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node:
return
__UpperCAmelCase : list[TreeNode] = []
__UpperCAmelCase : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = n.left
# end of while means current node doesn't have left child
__UpperCAmelCase : Optional[Any] = stack.pop()
# start to traverse its right child
__UpperCAmelCase : Any = n.right
def lowerCamelCase__ ( __lowerCamelCase : TreeNode ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node:
return
__UpperCAmelCase : list[TreeNode] = []
__UpperCAmelCase : str = node
while n or stack:
while n:
stack.append(__lowerCamelCase )
__UpperCAmelCase : List[Any] = n.left
__UpperCAmelCase : Any = stack.pop()
print(n.data , end=""",""" )
__UpperCAmelCase : List[str] = n.right
def lowerCamelCase__ ( __lowerCamelCase : TreeNode ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node:
return
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], []
__UpperCAmelCase : str = node
stacka.append(__lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
__UpperCAmelCase : str = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def lowerCamelCase__ ( __lowerCamelCase : str = "" , __lowerCamelCase : str=50 , __lowerCamelCase : Optional[Any]="*" ):
if not s:
return "\n" + width * char
__UpperCAmelCase , __UpperCAmelCase : Dict = divmod(width - len(__lowerCamelCase ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
a : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
from torch import nn
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 63 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int = 10**12 ):
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Any = 1
__UpperCAmelCase : int = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 1 / sqrt(2 ) ):
__UpperCAmelCase : Optional[Any] = tau * frequency / samplerate
__UpperCAmelCase : List[str] = sin(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = cos(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = _sin / (2 * q_factor)
__UpperCAmelCase : Union[str, Any] = (1 - _cos) / 2
__UpperCAmelCase : List[Any] = 1 - _cos
__UpperCAmelCase : Optional[Any] = 1 + alpha
__UpperCAmelCase : Union[str, Any] = -2 * _cos
__UpperCAmelCase : Union[str, Any] = 1 - alpha
__UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 1 / sqrt(2 ) ):
__UpperCAmelCase : str = tau * frequency / samplerate
__UpperCAmelCase : List[Any] = sin(__lowerCamelCase )
__UpperCAmelCase : Any = cos(__lowerCamelCase )
__UpperCAmelCase : Dict = _sin / (2 * q_factor)
__UpperCAmelCase : Optional[int] = (1 + _cos) / 2
__UpperCAmelCase : Optional[Any] = -1 - _cos
__UpperCAmelCase : int = 1 + alpha
__UpperCAmelCase : str = -2 * _cos
__UpperCAmelCase : List[str] = 1 - alpha
__UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 1 / sqrt(2 ) ):
__UpperCAmelCase : str = tau * frequency / samplerate
__UpperCAmelCase : List[str] = sin(__lowerCamelCase )
__UpperCAmelCase : int = cos(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
__UpperCAmelCase : List[str] = _sin / 2
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = -ba
__UpperCAmelCase : List[Any] = 1 + alpha
__UpperCAmelCase : List[str] = -2 * _cos
__UpperCAmelCase : Optional[int] = 1 - alpha
__UpperCAmelCase : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 1 / sqrt(2 ) ):
__UpperCAmelCase : int = tau * frequency / samplerate
__UpperCAmelCase : Any = sin(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = cos(__lowerCamelCase )
__UpperCAmelCase : Any = _sin / (2 * q_factor)
__UpperCAmelCase : Any = 1 - alpha
__UpperCAmelCase : Any = -2 * _cos
__UpperCAmelCase : Tuple = 1 + alpha
__UpperCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : float = 1 / sqrt(2 ) , ):
__UpperCAmelCase : Tuple = tau * frequency / samplerate
__UpperCAmelCase : Union[str, Any] = sin(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = cos(__lowerCamelCase )
__UpperCAmelCase : int = _sin / (2 * q_factor)
__UpperCAmelCase : Any = 10 ** (gain_db / 40)
__UpperCAmelCase : int = 1 + alpha * big_a
__UpperCAmelCase : Optional[Any] = -2 * _cos
__UpperCAmelCase : str = 1 - alpha * big_a
__UpperCAmelCase : List[str] = 1 + alpha / big_a
__UpperCAmelCase : Dict = -2 * _cos
__UpperCAmelCase : List[str] = 1 - alpha / big_a
__UpperCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : float = 1 / sqrt(2 ) , ):
__UpperCAmelCase : List[str] = tau * frequency / samplerate
__UpperCAmelCase : int = sin(__lowerCamelCase )
__UpperCAmelCase : Any = cos(__lowerCamelCase )
__UpperCAmelCase : Dict = _sin / (2 * q_factor)
__UpperCAmelCase : List[Any] = 10 ** (gain_db / 40)
__UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
__UpperCAmelCase : Tuple = (big_a + 1) + (big_a - 1) * _cos
__UpperCAmelCase : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
__UpperCAmelCase : List[Any] = (big_a - 1) + (big_a + 1) * _cos
__UpperCAmelCase : str = 2 * sqrt(__lowerCamelCase ) * alpha
__UpperCAmelCase : Tuple = big_a * (pmc + aaa)
__UpperCAmelCase : List[str] = 2 * big_a * mpc
__UpperCAmelCase : Tuple = big_a * (pmc - aaa)
__UpperCAmelCase : Optional[int] = ppmc + aaa
__UpperCAmelCase : Any = -2 * pmpc
__UpperCAmelCase : List[str] = ppmc - aaa
__UpperCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : float = 1 / sqrt(2 ) , ):
__UpperCAmelCase : Any = tau * frequency / samplerate
__UpperCAmelCase : Optional[int] = sin(__lowerCamelCase )
__UpperCAmelCase : Dict = cos(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
__UpperCAmelCase : str = 10 ** (gain_db / 40)
__UpperCAmelCase : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
__UpperCAmelCase : Any = (big_a + 1) + (big_a - 1) * _cos
__UpperCAmelCase : str = (big_a - 1) - (big_a + 1) * _cos
__UpperCAmelCase : List[str] = (big_a - 1) + (big_a + 1) * _cos
__UpperCAmelCase : str = 2 * sqrt(__lowerCamelCase ) * alpha
__UpperCAmelCase : int = big_a * (ppmc + aaa)
__UpperCAmelCase : Optional[Any] = -2 * big_a * pmpc
__UpperCAmelCase : List[str] = big_a * (ppmc - aaa)
__UpperCAmelCase : Union[str, Any] = pmc + aaa
__UpperCAmelCase : Tuple = 2 * mpc
__UpperCAmelCase : Any = pmc - aaa
__UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 63 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = MgpstrTokenizer
a : Any = False
a : int = {}
a : Tuple = False
def UpperCAmelCase ( self : str ) -> Optional[int]:
super().setUp()
# fmt: off
__UpperCAmelCase : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__UpperCAmelCase : List[str] = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowercase ) + """\n""" )
def UpperCAmelCase ( self : Any , **__lowercase : int ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : int , __lowercase : str ) -> Any:
__UpperCAmelCase : int = """tester"""
__UpperCAmelCase : List[str] = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase ( self : Any ) -> Optional[int]:
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : int = self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCAmelCase : int = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__UpperCAmelCase : List[Any] = tokenizer.encode([special_token] , add_special_tokens=__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
__UpperCAmelCase : List[Any] = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertNotEqual(len(__lowercase ) , 0 )
__UpperCAmelCase : str = tokenizer.decode(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , __lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase ( self : Dict ) -> Any:
pass
| 63 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = BlipImageProcessor()
__UpperCAmelCase : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__UpperCAmelCase : Optional[Any] = BlipProcessor(__lowercase , __lowercase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : int , **__lowercase : Optional[Any] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).tokenizer
def UpperCAmelCase ( self : Optional[Any] , **__lowercase : int ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def UpperCAmelCase ( self : int ) -> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Optional[Any] = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__UpperCAmelCase : List[str] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : str = image_processor(__lowercase , return_tensors="""np""" )
__UpperCAmelCase : Dict = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : str = """lower newer"""
__UpperCAmelCase : int = processor(text=__lowercase )
__UpperCAmelCase : Any = tokenizer(__lowercase , return_token_type_ids=__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self : Dict ) -> Any:
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Dict = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : Union[str, Any] = """lower newer"""
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : int = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : str = processor.batch_decode(__lowercase )
__UpperCAmelCase : Dict = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Dict = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : List[Any] = """lower newer"""
__UpperCAmelCase : List[str] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(text=__lowercase , images=__lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 63 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
a : int = logging.getLogger()
def lowerCamelCase__ ( __lowerCamelCase : Path , __lowerCamelCase : list ):
__UpperCAmelCase : Optional[Any] = """\n""".join(__lowerCamelCase )
Path(__lowerCamelCase ).open("""w""" ).writelines(__lowerCamelCase )
a : Optional[int] = "patrickvonplaten/t5-tiny-random"
a : Tuple = "sshleifer/bart-tiny-random"
a : Optional[int] = "sshleifer/tiny-mbart"
a : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self : List[Any] , __lowercase : List[Any] ) -> Any:
__UpperCAmelCase : Any = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__UpperCAmelCase : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__UpperCAmelCase : Tuple = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(__lowercase , __lowercase )
__UpperCAmelCase : int = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
__UpperCAmelCase : Tuple = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__UpperCAmelCase : Dict = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(__lowercase , """argv""" , __lowercase ):
run_generate()
assert Path(__lowercase ).exists()
# os.remove(Path(output_file_name))
def UpperCAmelCase ( self : Optional[int] ) -> str:
self.run_eval_tester(__lowercase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[Any] ) -> Any:
self.run_eval_tester(__lowercase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCAmelCase ( self : str , __lowercase : List[str] ) -> str:
__UpperCAmelCase : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__UpperCAmelCase : Dict = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__UpperCAmelCase : Tuple = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
__UpperCAmelCase : Any = Path(self.get_auto_remove_tmp_dir() )
__UpperCAmelCase : Optional[int] = str(tmp_dir / """scores.json""" )
__UpperCAmelCase : Dict = str(tmp_dir / """val.target""" )
_dump_articles(__lowercase , text["""en"""] )
_dump_articles(__lowercase , text["""de"""] )
__UpperCAmelCase : Dict = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__UpperCAmelCase : Any = f"""
run_eval_search.py
{model}
{str(__lowercase )}
{str(__lowercase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(__lowercase , """argv""" , __lowercase ):
with CaptureStdout() as cs:
run_search()
__UpperCAmelCase : List[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
__UpperCAmelCase : Tuple = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(__lowercase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__lowercase ).exists()
os.remove(Path(__lowercase ) )
| 63 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a : str = get_tests_dir("fixtures/test_sentencepiece.model")
a : int = {"target_lang": "fi", "source_lang": "en"}
a : Any = ">>zh<<"
a : List[Any] = "Helsinki-NLP/"
if is_torch_available():
a : Dict = "pt"
elif is_tf_available():
a : Optional[int] = "tf"
else:
a : List[Any] = "jax"
@require_sentencepiece
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : List[str] = MarianTokenizer
a : List[Any] = False
a : Union[str, Any] = True
def UpperCAmelCase ( self : str ) -> Optional[Any]:
super().setUp()
__UpperCAmelCase : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__UpperCAmelCase : str = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : Dict = Path(self.tmpdirname )
save_json(__lowercase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__lowercase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowercase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__lowercase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__UpperCAmelCase : Union[str, Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Optional[Any] , **__lowercase : Any ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : int ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : Dict = """</s>"""
__UpperCAmelCase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Any:
__UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__lowercase ) , 9 )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__UpperCAmelCase : Union[str, Any] = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
__UpperCAmelCase : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(__lowercase , batch.input_ids[0] )
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowercase )
__UpperCAmelCase : Tuple = [x.name for x in Path(__lowercase ).glob("""*""" )]
self.assertIn("""source.spm""" , __lowercase )
MarianTokenizer.from_pretrained(__lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Dict = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=__lowercase , truncation=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : str = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def UpperCAmelCase ( self : Any ) -> List[Any]:
# fmt: off
__UpperCAmelCase : Optional[int] = {"""input_ids""": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : int = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__UpperCAmelCase : Optional[int] = """Tämä on testi"""
__UpperCAmelCase : Any = """This is a test"""
__UpperCAmelCase : int = [76, 7, 2047, 2]
__UpperCAmelCase : Tuple = [69, 12, 11, 940, 2]
__UpperCAmelCase : List[Any] = tokenizer(__lowercase ).input_ids
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = tokenizer(text_target=__lowercase ).input_ids
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : str = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
import qiskit
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase : Tuple = qiskit.QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__UpperCAmelCase : Tuple = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 63 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple=2 , __lowercase : Any=32 , __lowercase : Any=16 , __lowercase : str=3 , __lowercase : Dict=True , __lowercase : Tuple=True , __lowercase : Any=32 , __lowercase : Dict=4 , __lowercase : List[str]=[0, 1, 2, 3] , __lowercase : Optional[int]=4 , __lowercase : Dict=37 , __lowercase : Union[str, Any]="gelu" , __lowercase : str=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=0.02 , __lowercase : Dict=3 , __lowercase : Optional[int]=[1, 384, 24, 24] , __lowercase : List[str]=True , __lowercase : Any=None , ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Tuple = backbone_out_indices
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : List[str] = backbone_featmap_shape
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2
__UpperCAmelCase : Optional[int] = num_patches + 1
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Optional[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = DPTModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : str , __lowercase : List[Any] , __lowercase : str , __lowercase : str ) -> int:
__UpperCAmelCase : Union[str, Any] = self.num_labels
__UpperCAmelCase : Optional[int] = DPTForDepthEstimation(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self : int , __lowercase : Any , __lowercase : Dict , __lowercase : List[str] ) -> str:
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : Any = DPTForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Dict = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : List[str] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : List[str] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : Optional[Any] = False
a : Dict = False
a : Dict = False
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Union[str, Any] = DPTModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def UpperCAmelCase ( self : str ) -> Optional[int]:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def UpperCAmelCase ( self : Optional[int] ) -> str:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(__lowercase )
__UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = True
if model_class in get_values(__lowercase ):
continue
__UpperCAmelCase : List[str] = model_class(__lowercase )
model.to(__lowercase )
model.train()
__UpperCAmelCase : int = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__UpperCAmelCase : List[str] = model(**__lowercase ).loss
loss.backward()
def UpperCAmelCase ( self : Any ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = False
__UpperCAmelCase : str = True
if model_class in get_values(__lowercase ) or not model_class.supports_gradient_checkpointing:
continue
__UpperCAmelCase : Optional[int] = model_class(__lowercase )
model.to(__lowercase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : Optional[Any] = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__UpperCAmelCase : Tuple = model(**__lowercase ).loss
loss.backward()
def UpperCAmelCase ( self : Dict ) -> int:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = _config_zero_init(__lowercase )
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(config=__lowercase )
# Skip the check for the backbone
__UpperCAmelCase : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__UpperCAmelCase : Any = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
pass
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__UpperCAmelCase : Tuple = DPTModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def UpperCAmelCase ( self : int ) -> int:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = """add"""
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Union[str, Any] = DPTForDepthEstimation(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Optional[int] ) -> str:
__UpperCAmelCase : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__UpperCAmelCase : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__lowercase )
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Optional[int] = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**__lowercase )
__UpperCAmelCase : List[Any] = outputs.predicted_depth
# verify the predicted depth
__UpperCAmelCase : Dict = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __lowercase )
__UpperCAmelCase : int = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __lowercase , atol=1e-4 ) )
| 63 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def lowerCamelCase__ ( __lowerCamelCase : Any ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
a : List[str] = parser.parse_args()
a : Optional[Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a : Any = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
a : List[Any] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
a : Any = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
def remove_articles(__lowerCamelCase : List[str] ):
__UpperCAmelCase : List[str] = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__lowerCamelCase , """ """ , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Any ):
__UpperCAmelCase : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
return int(normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = [any(compute_exact(__lowerCamelCase , __lowerCamelCase ) for ref in refs ) for pred, refs in zip(__lowerCamelCase , __lowerCamelCase )]
return (sum(__lowerCamelCase ) / len(__lowerCamelCase )) * 100
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : Dict = [rgram for rgrams in rgramslist for rgram in rgrams]
__UpperCAmelCase : Optional[int] = Counter(__lowerCamelCase )
__UpperCAmelCase : List[Any] = Counter(__lowerCamelCase )
__UpperCAmelCase : str = Counter()
for sgram, scount in sgramcounter.items():
__UpperCAmelCase : int = scount * numref
__UpperCAmelCase : Union[str, Any] = Counter(__lowerCamelCase )
__UpperCAmelCase : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
__UpperCAmelCase : str = ccount * numref
# KEEP
__UpperCAmelCase : Dict = sgramcounter_rep & cgramcounter_rep
__UpperCAmelCase : str = keepgramcounter_rep & rgramcounter
__UpperCAmelCase : Optional[Any] = sgramcounter_rep & rgramcounter
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : int = 1
__UpperCAmelCase : Union[str, Any] = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Optional[int] = keeptmpscorea / len(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__UpperCAmelCase : Optional[int] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__UpperCAmelCase : Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__UpperCAmelCase : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__UpperCAmelCase : List[str] = sgramcounter_rep - cgramcounter_rep
__UpperCAmelCase : Union[str, Any] = delgramcounter_rep - rgramcounter
__UpperCAmelCase : Union[str, Any] = sgramcounter_rep - rgramcounter
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Dict = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : Union[str, Any] = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Any = deltmpscorea / len(__lowerCamelCase )
# ADDITION
__UpperCAmelCase : Optional[int] = set(__lowerCamelCase ) - set(__lowerCamelCase )
__UpperCAmelCase : List[str] = set(__lowerCamelCase ) & set(__lowerCamelCase )
__UpperCAmelCase : Tuple = set(__lowerCamelCase ) - set(__lowerCamelCase )
__UpperCAmelCase : Any = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : str = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Dict = addtmpscore / len(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : str = addtmpscore / len(__lowerCamelCase )
__UpperCAmelCase : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
__UpperCAmelCase : Union[str, Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[int] = len(__lowerCamelCase )
__UpperCAmelCase : Any = ssent.split(""" """ )
__UpperCAmelCase : List[str] = csent.split(""" """ )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Optional[int] = []
for rsent in rsents:
__UpperCAmelCase : List[str] = rsent.split(""" """ )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : int = []
__UpperCAmelCase : str = []
ragramslist.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Optional[Any] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Tuple = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Any = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Union[str, Any] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Optional[int] = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Dict = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Tuple = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[Any] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Union[str, Any] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : int = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__UpperCAmelCase : Optional[int] = sum([delascore, delascore, delascore, delascore] ) / 4
__UpperCAmelCase : List[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
__UpperCAmelCase : int = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : bool = True , __lowerCamelCase : str = "13a" , __lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__UpperCAmelCase : Union[str, Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__UpperCAmelCase : List[str] = sacrebleu.metrics.bleu._get_tokenizer(__lowerCamelCase )()(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCamelCase )
elif tokenizer == "moses":
__UpperCAmelCase : Optional[int] = sacremoses.MosesTokenizer().tokenize(__lowerCamelCase , return_str=__lowerCamelCase , escape=__lowerCamelCase )
elif tokenizer == "penn":
__UpperCAmelCase : Optional[int] = sacremoses.MosesTokenizer().penn_tokenize(__lowerCamelCase , return_str=__lowerCamelCase )
else:
__UpperCAmelCase : str = sentence
if not return_str:
__UpperCAmelCase : Optional[int] = normalized_sent.split()
return normalized_sent
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int ):
if not (len(__lowerCamelCase ) == len(__lowerCamelCase ) == len(__lowerCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
__UpperCAmelCase : Union[str, Any] = 0
for src, pred, refs in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
sari_score += SARIsent(normalize(__lowerCamelCase ) , normalize(__lowerCamelCase ) , [normalize(__lowerCamelCase ) for sent in refs] )
__UpperCAmelCase : List[str] = sari_score / len(__lowerCamelCase )
return 100 * sari_score
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]="exp" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=False , ):
__UpperCAmelCase : Optional[int] = len(references[0] )
if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__UpperCAmelCase : Optional[Any] = [[refs[i] for refs in references] for i in range(__lowerCamelCase )]
__UpperCAmelCase : Union[str, Any] = sacrebleu.corpus_bleu(
__lowerCamelCase , __lowerCamelCase , smooth_method=__lowerCamelCase , smooth_value=__lowerCamelCase , force=__lowerCamelCase , lowercase=__lowerCamelCase , use_effective_order=__lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def UpperCAmelCase ( self : int , __lowercase : str , __lowercase : Optional[int] , __lowercase : int ) -> Union[str, Any]:
__UpperCAmelCase : str = {}
result.update({"""sari""": compute_sari(sources=__lowercase , predictions=__lowercase , references=__lowercase )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=__lowercase , references=__lowercase )} )
result.update({"""exact""": compute_em(predictions=__lowercase , references=__lowercase )} )
return result
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if number < 1:
__UpperCAmelCase : List[str] = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__lowerCamelCase )
__UpperCAmelCase : int = 1
for i in range(1 , __lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : str ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=__lowercase , dtype=jnp.bfloataa )
__UpperCAmelCase , __UpperCAmelCase : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=__lowercase , from_pt=__lowercase , dtype=jnp.bfloataa )
__UpperCAmelCase : Tuple = controlnet_params
__UpperCAmelCase : List[str] = """bird"""
__UpperCAmelCase : str = jax.device_count()
__UpperCAmelCase : Optional[int] = pipe.prepare_text_inputs([prompts] * num_samples )
__UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
__UpperCAmelCase : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
__UpperCAmelCase : Tuple = jax.random.PRNGKey(0 )
__UpperCAmelCase : Any = jax.random.split(__lowercase , jax.device_count() )
__UpperCAmelCase : List[str] = replicate(__lowercase )
__UpperCAmelCase : int = shard(__lowercase )
__UpperCAmelCase : Optional[Any] = shard(__lowercase )
__UpperCAmelCase : Optional[int] = pipe(
prompt_ids=__lowercase , image=__lowercase , params=__lowercase , prng_seed=__lowercase , num_inference_steps=50 , jit=__lowercase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__UpperCAmelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase : Optional[int] = images[0, 253:256, 253:256, -1]
__UpperCAmelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase : List[str] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Optional[int] ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Dict = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=__lowercase , dtype=jnp.bfloataa )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=__lowercase , from_pt=__lowercase , dtype=jnp.bfloataa )
__UpperCAmelCase : Union[str, Any] = controlnet_params
__UpperCAmelCase : Optional[int] = """Chef in the kitchen"""
__UpperCAmelCase : Any = jax.device_count()
__UpperCAmelCase : Optional[int] = pipe.prepare_text_inputs([prompts] * num_samples )
__UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
__UpperCAmelCase : Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
__UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
__UpperCAmelCase : Tuple = jax.random.split(__lowercase , jax.device_count() )
__UpperCAmelCase : int = replicate(__lowercase )
__UpperCAmelCase : Optional[int] = shard(__lowercase )
__UpperCAmelCase : str = shard(__lowercase )
__UpperCAmelCase : List[Any] = pipe(
prompt_ids=__lowercase , image=__lowercase , params=__lowercase , prng_seed=__lowercase , num_inference_steps=50 , jit=__lowercase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__UpperCAmelCase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
__UpperCAmelCase : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase : Any = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 63 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 1 |
import argparse
from collections import defaultdict
import yaml
a : Any = "docs/source/en/_toctree.yml"
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : List[str] = defaultdict(__lowerCamelCase )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[int] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__lowerCamelCase )
__UpperCAmelCase : Dict = new_doc_list
__UpperCAmelCase : Optional[int] = [key for key, value in counts.items() if value > 1]
__UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
__UpperCAmelCase : Optional[Any] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
__UpperCAmelCase : int = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__lowerCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__lowerCamelCase )
# Sort
return overview_doc
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any]=False ):
with open(__lowerCamelCase , encoding="""utf-8""" ) as f:
__UpperCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase : Any = content[api_idx]["""sections"""]
# Then to the model doc
__UpperCAmelCase : Optional[int] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__UpperCAmelCase : str = api_doc[scheduler_idx]["""sections"""]
__UpperCAmelCase : Dict = clean_doc_toc(__lowerCamelCase )
__UpperCAmelCase : List[str] = False
if new_scheduler_doc != scheduler_doc:
__UpperCAmelCase : Optional[int] = True
if overwrite:
__UpperCAmelCase : int = new_scheduler_doc
if diff:
if overwrite:
__UpperCAmelCase : str = api_doc
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def lowerCamelCase__ ( __lowerCamelCase : List[Any]=False ):
with open(__lowerCamelCase , encoding="""utf-8""" ) as f:
__UpperCAmelCase : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase : Tuple = content[api_idx]["""sections"""]
# Then to the model doc
__UpperCAmelCase : Optional[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : int = api_doc[pipeline_idx]["""sections"""]
__UpperCAmelCase : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__UpperCAmelCase : Optional[Any] = pipeline_doc["""section"""]
__UpperCAmelCase : List[Any] = clean_doc_toc(__lowerCamelCase )
if overwrite:
__UpperCAmelCase : List[str] = new_sub_pipeline_doc
new_pipeline_docs.append(__lowerCamelCase )
# sort overall pipeline doc
__UpperCAmelCase : Any = clean_doc_toc(__lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
__UpperCAmelCase : Tuple = True
if overwrite:
__UpperCAmelCase : Any = new_pipeline_docs
if diff:
if overwrite:
__UpperCAmelCase : List[Any] = api_doc
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Tuple = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : List[Any] = "sshleifer/bart-tiny-random"
a : Any = "patrickvonplaten/t5-tiny-random"
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
return AutoConfig.from_pretrained(__lowercase )
def UpperCAmelCase ( self : str ) -> List[str]:
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(__lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase , *__UpperCAmelCase : int = create_student_by_copying_alternating_layers(__lowercase , tempfile.mkdtemp() , e=1 , d=__lowercase )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
__UpperCAmelCase , *__UpperCAmelCase : List[Any] = create_student_by_copying_alternating_layers(__lowercase , tempfile.mkdtemp() , e=1 , d=__lowercase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase ( self : Dict ) -> str:
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(__lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
with self.assertRaises(__lowercase ):
create_student_by_copying_alternating_layers(__lowercase , tempfile.mkdtemp() , e=__lowercase , d=__lowercase )
| 63 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : str = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
__UpperCAmelCase : Optional[Any] = DatasetInfosDict.from_directory(__lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : DatasetInfo ):
__UpperCAmelCase : Optional[Any] = str(__lowerCamelCase )
dataset_info.write_to_directory(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = DatasetInfo.from_directory(__lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowerCamelCase , """dataset_info.json""" ) )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
__UpperCAmelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(__lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__UpperCAmelCase : Dict = yaml.safe_dump(__lowerCamelCase )
__UpperCAmelCase : Dict = yaml.safe_load(__lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = DatasetInfo()
__UpperCAmelCase : Tuple = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : DatasetInfosDict ):
__UpperCAmelCase : str = str(__lowerCamelCase )
dataset_infos_dict.write_to_directory(__lowerCamelCase )
__UpperCAmelCase : int = DatasetInfosDict.from_directory(__lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__UpperCAmelCase : List[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__UpperCAmelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowerCamelCase , """README.md""" ) )
| 63 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
__UpperCAmelCase : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCamelCase__ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : int = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class a ( unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase ( self : Any ) -> int:
__UpperCAmelCase : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__UpperCAmelCase : List[str] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__UpperCAmelCase : List[Any] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__UpperCAmelCase : int = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__UpperCAmelCase : List[str] = text_classifier("""This is great !""" , return_all_scores=__lowercase )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__UpperCAmelCase : str = text_classifier("""This is great !""" , return_all_scores=__lowercase )
self.assertEqual(
nested_simplify(__lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__UpperCAmelCase : Optional[Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__lowercase )
self.assertEqual(
nested_simplify(__lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__UpperCAmelCase : Optional[int] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__lowercase )
self.assertEqual(
nested_simplify(__lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
import torch
__UpperCAmelCase : Optional[int] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__UpperCAmelCase : Dict = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__UpperCAmelCase : Any = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : Dict = pipeline("""text-classification""" )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__UpperCAmelCase : Optional[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__UpperCAmelCase : Optional[Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase : str = pipeline("""text-classification""" , framework="""tf""" )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__UpperCAmelCase : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def UpperCAmelCase ( self : str , __lowercase : Optional[Any] , __lowercase : str , __lowercase : int ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = TextClassificationPipeline(model=__lowercase , tokenizer=__lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase ( self : Any , __lowercase : Dict , __lowercase : Any ) -> Dict:
__UpperCAmelCase : int = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__UpperCAmelCase : int = """HuggingFace is in"""
__UpperCAmelCase : List[Any] = text_classifier(__lowercase )
self.assertEqual(nested_simplify(__lowercase ) , [{"""label""": ANY(__lowercase ), """score""": ANY(__lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__UpperCAmelCase : List[Any] = ["""HuggingFace is in """, """Paris is in France"""]
__UpperCAmelCase : Optional[int] = text_classifier(__lowercase )
self.assertEqual(
nested_simplify(__lowercase ) , [{"""label""": ANY(__lowercase ), """score""": ANY(__lowercase )}, {"""label""": ANY(__lowercase ), """score""": ANY(__lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__UpperCAmelCase : Any = text_classifier(__lowercase , top_k=__lowercase )
__UpperCAmelCase : Tuple = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowercase ) , [[{"""label""": ANY(__lowercase ), """score""": ANY(__lowercase )}] * N, [{"""label""": ANY(__lowercase ), """score""": ANY(__lowercase )}] * N] , )
__UpperCAmelCase : List[str] = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__UpperCAmelCase : List[str] = text_classifier(__lowercase )
self.assertEqual(
nested_simplify(__lowercase ) , {"""label""": ANY(__lowercase ), """score""": ANY(__lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__UpperCAmelCase : List[Any] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__lowercase ):
text_classifier(__lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__UpperCAmelCase : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__lowercase ) , [{"""label""": ANY(__lowercase ), """score""": ANY(__lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a : List[str] = "scheduler_config.json"
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 1
a : str = 2
a : Tuple = 3
a : Tuple = 4
a : str = 5
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : jnp.ndarray
class a :
"""simple docstring"""
a : List[str] = SCHEDULER_CONFIG_NAME
a : str = ['dtype']
a : str = []
a : Any = True
@classmethod
def UpperCAmelCase ( cls : Tuple , __lowercase : Dict[str, Any] = None , __lowercase : Optional[str] = None , __lowercase : Union[str, Any]=False , **__lowercase : Union[str, Any] , ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.load_config(
pretrained_model_name_or_path=__lowercase , subfolder=__lowercase , return_unused_kwargs=__lowercase , **__lowercase , )
__UpperCAmelCase , __UpperCAmelCase : List[str] = cls.from_config(__lowercase , return_unused_kwargs=__lowercase , **__lowercase )
if hasattr(__lowercase , """create_state""" ) and getattr(__lowercase , """has_state""" , __lowercase ):
__UpperCAmelCase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Optional[int] , __lowercase : Union[str, os.PathLike] , __lowercase : bool = False , **__lowercase : Dict ) -> Union[str, Any]:
self.save_config(save_directory=__lowercase , push_to_hub=__lowercase , **__lowercase )
@property
def UpperCAmelCase ( self : Dict ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : str ) -> Any:
__UpperCAmelCase : Dict = list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase : Optional[int] = importlib.import_module(__name__.split(""".""" )[0] )
__UpperCAmelCase : Tuple = [
getattr(__lowercase , __lowercase ) for c in compatible_classes_str if hasattr(__lowercase , __lowercase )
]
return compatible_classes
def lowerCamelCase__ ( __lowerCamelCase : jnp.ndarray , __lowerCamelCase : Tuple[int] ):
assert len(__lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__lowerCamelCase ) - x.ndim) ) , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Any=0.9_9_9 , __lowerCamelCase : List[Any]=jnp.floataa ):
def alpha_bar(__lowerCamelCase : Tuple ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__UpperCAmelCase : str = []
for i in range(__lowerCamelCase ):
__UpperCAmelCase : List[str] = i / num_diffusion_timesteps
__UpperCAmelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__lowerCamelCase ) / alpha_bar(__lowerCamelCase ) , __lowerCamelCase ) )
return jnp.array(__lowerCamelCase , dtype=__lowerCamelCase )
@flax.struct.dataclass
class a :
"""simple docstring"""
a : jnp.ndarray
a : jnp.ndarray
a : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : List[Any] ) -> List[Any]:
__UpperCAmelCase : str = scheduler.config
if config.trained_betas is not None:
__UpperCAmelCase : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__UpperCAmelCase : Union[str, Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
__UpperCAmelCase : Union[str, Any] = 1.0 - betas
__UpperCAmelCase : Any = jnp.cumprod(__lowercase , axis=0 )
return cls(
alphas=__lowercase , betas=__lowercase , alphas_cumprod=__lowercase , )
def lowerCamelCase__ ( __lowerCamelCase : CommonSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray ):
__UpperCAmelCase : Dict = state.alphas_cumprod
__UpperCAmelCase : int = alphas_cumprod[timesteps] ** 0.5
__UpperCAmelCase : Union[str, Any] = sqrt_alpha_prod.flatten()
__UpperCAmelCase : str = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
__UpperCAmelCase : Dict = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCAmelCase : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
__UpperCAmelCase : Optional[int] = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCamelCase__ ( __lowerCamelCase : CommonSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray ):
__UpperCAmelCase , __UpperCAmelCase : str = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCamelCase__ ( __lowerCamelCase : CommonSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray ):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 63 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int = 100 ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Tuple ):
__UpperCAmelCase : Union[str, Any] = k_size // 2
__UpperCAmelCase , __UpperCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(__lowerCamelCase ) + square(__lowerCamelCase )) / (2 * square(__lowerCamelCase )) )
return g
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase , __UpperCAmelCase : int = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : Optional[Any] = height - k_size + 1
__UpperCAmelCase : Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : Dict = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : List[str] = 0
for i, j in product(range(__lowerCamelCase ) , range(__lowerCamelCase ) ):
__UpperCAmelCase : List[Any] = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : str = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : Any = gen_gaussian_kernel(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = ravel(__lowerCamelCase )
# reshape and get the dst image
__UpperCAmelCase : List[Any] = dot(__lowerCamelCase , __lowerCamelCase ).reshape(__lowerCamelCase , __lowerCamelCase ).astype(__lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
a : Tuple = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
a : Tuple = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
a : Tuple = gaussian_filter(gray, 3, sigma=1)
a : Any = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 63 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a : Union[str, Any] = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__UpperCAmelCase : List[str] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__lowerCamelCase , id=__lowerCamelCase )
| 63 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 1 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : int = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'owlvit_text_model'
def __init__( self : int , __lowercase : str=49408 , __lowercase : Dict=512 , __lowercase : Dict=2048 , __lowercase : Optional[Any]=12 , __lowercase : str=8 , __lowercase : Union[str, Any]=16 , __lowercase : List[str]="quick_gelu" , __lowercase : Dict=1e-5 , __lowercase : Optional[Any]=0.0 , __lowercase : List[str]=0.02 , __lowercase : Tuple=1.0 , __lowercase : Union[str, Any]=0 , __lowercase : Optional[int]=49406 , __lowercase : Any=49407 , **__lowercase : int , ) -> int:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[str] = initializer_factor
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
__UpperCAmelCase : List[str] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : str = 'owlvit_vision_model'
def __init__( self : Union[str, Any] , __lowercase : Optional[int]=768 , __lowercase : Dict=3072 , __lowercase : Dict=12 , __lowercase : Tuple=12 , __lowercase : str=3 , __lowercase : List[Any]=768 , __lowercase : List[str]=32 , __lowercase : Tuple="quick_gelu" , __lowercase : Tuple=1e-5 , __lowercase : List[Any]=0.0 , __lowercase : Tuple=0.02 , __lowercase : Optional[int]=1.0 , **__lowercase : Optional[int] , ) -> Tuple:
super().__init__(**__lowercase )
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : List[Any] = patch_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[int] = initializer_factor
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : int = 'owlvit'
a : int = True
def __init__( self : Optional[int] , __lowercase : Any=None , __lowercase : Tuple=None , __lowercase : str=512 , __lowercase : Dict=2.6_592 , __lowercase : Tuple=True , **__lowercase : Optional[int] , ) -> Any:
super().__init__(**__lowercase )
if text_config is None:
__UpperCAmelCase : Optional[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
__UpperCAmelCase : List[Any] = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = OwlViTTextConfig(**__lowercase )
__UpperCAmelCase : int = OwlViTVisionConfig(**__lowercase )
__UpperCAmelCase : List[str] = projection_dim
__UpperCAmelCase : Optional[Any] = logit_scale_init_value
__UpperCAmelCase : List[Any] = return_dict
__UpperCAmelCase : List[Any] = 1.0
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = cls.get_config_dict(__lowercase , **__lowercase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
@classmethod
def UpperCAmelCase ( cls : int , __lowercase : Dict , __lowercase : Dict , **__lowercase : Any ) -> Optional[int]:
__UpperCAmelCase : str = {}
__UpperCAmelCase : List[str] = text_config
__UpperCAmelCase : Optional[Any] = vision_config
return cls.from_dict(__lowercase , **__lowercase )
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[Any] = self.text_config.to_dict()
__UpperCAmelCase : str = self.vision_config.to_dict()
__UpperCAmelCase : Tuple = self.__class__.model_type
return output
class a ( lowercase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> float:
return 1e-4
def UpperCAmelCase ( self : Any , __lowercase : "ProcessorMixin" , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
__UpperCAmelCase : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__lowercase , seq_length=__lowercase , framework=__lowercase )
__UpperCAmelCase : Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__lowercase , framework=__lowercase )
return {**text_input_dict, **image_input_dict}
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
return 14
| 63 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : Optional[int] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
a : Any = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
for name in state_dict_keys:
__UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCamelCase )
# emb -> embedding
if name.startswith("""emb.""" ):
__UpperCAmelCase : Optional[int] = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
__UpperCAmelCase : Optional[int] = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
__UpperCAmelCase : Tuple = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , __lowerCamelCase )
# ffn -> feed_forward
__UpperCAmelCase : Tuple = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , __lowerCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
__UpperCAmelCase : Optional[Any] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
__UpperCAmelCase : Any = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
__UpperCAmelCase : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
__UpperCAmelCase : Union[str, Any] = """rwkv.""" + name
__UpperCAmelCase : Tuple = weight
return state_dict
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Tuple=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
__UpperCAmelCase : Tuple = 50277
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
__UpperCAmelCase : Any = PreTrainedTokenizerFast(tokenizer_file=__lowerCamelCase )
__UpperCAmelCase : List[str] = len(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
# 2. Build the config
__UpperCAmelCase : Union[str, Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__UpperCAmelCase : Optional[int] = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
__UpperCAmelCase : List[str] = RwkvConfig(
vocab_size=__lowerCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCamelCase )
# 3. Download model file then convert state_dict
__UpperCAmelCase : Tuple = hf_hub_download(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Dict = convert_state_dict(__lowerCamelCase )
# 4. Split in shards and save
__UpperCAmelCase , __UpperCAmelCase : List[str] = shard_checkpoint(__lowerCamelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if index is not None:
__UpperCAmelCase : Optional[Any] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# Save the index as well
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
__UpperCAmelCase : Dict = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n"""
f.write(__lowerCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
__UpperCAmelCase : List[str] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__UpperCAmelCase : Dict = torch.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
__UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase , max_shard_size="""2GB""" )
tokenizer.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 63 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : list ):
if len(__lowerCamelCase ) < 2:
return collection
def circle_sort_util(__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int ) -> bool:
__UpperCAmelCase : Dict = False
if low == high:
return swapped
__UpperCAmelCase : int = low
__UpperCAmelCase : List[str] = high
while left < right:
if collection[left] > collection[right]:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = (
collection[right],
collection[left],
)
__UpperCAmelCase : Dict = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__UpperCAmelCase , __UpperCAmelCase : Any = (
collection[right + 1],
collection[left],
)
__UpperCAmelCase : Any = True
__UpperCAmelCase : Dict = low + int((high - low) / 2 )
__UpperCAmelCase : str = circle_sort_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = circle_sort_util(__lowerCamelCase , mid + 1 , __lowerCamelCase )
return swapped or left_swap or right_swap
__UpperCAmelCase : int = True
while is_not_sorted is True:
__UpperCAmelCase : Optional[int] = circle_sort_util(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 )
return collection
if __name__ == "__main__":
a : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
a : List[str] = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
a : Optional[Any] = random.Random()
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None ):
if rng is None:
__UpperCAmelCase : int = global_rng
__UpperCAmelCase : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __lowercase : List[Any] , __lowercase : Dict=7 , __lowercase : Tuple=400 , __lowercase : Optional[int]=2000 , __lowercase : List[Any]=1 , __lowercase : List[str]=0.0 , __lowercase : str=16000 , __lowercase : Tuple=True , __lowercase : List[str]=True , ) -> Dict:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Union[str, Any] = min_seq_length
__UpperCAmelCase : Any = max_seq_length
__UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : List[Any] = feature_size
__UpperCAmelCase : Optional[Any] = padding_value
__UpperCAmelCase : Tuple = sampling_rate
__UpperCAmelCase : Optional[Any] = return_attention_mask
__UpperCAmelCase : int = do_normalize
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase ( self : Optional[int] , __lowercase : int=False , __lowercase : str=False ) -> int:
def _flatten(__lowercase : Optional[Any] ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
__UpperCAmelCase : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase : str = [np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Tuple = WavaVecaFeatureExtractor
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractionTester(self )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Any ) -> List[str]:
self.assertTrue(np.all(np.mean(__lowercase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowercase , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCAmelCase ( self : Dict ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase : Optional[Any] = [np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase : int = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
__UpperCAmelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test batched
__UpperCAmelCase : int = feat_extract(__lowercase , return_tensors="""np""" ).input_values
__UpperCAmelCase : Optional[int] = feat_extract(__lowercase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCAmelCase : List[Any] = np.asarray(__lowercase )
__UpperCAmelCase : int = feat_extract(__lowercase , return_tensors="""np""" ).input_values
__UpperCAmelCase : Optional[Any] = feat_extract(__lowercase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
__UpperCAmelCase : Any = [None, 1600, None]
for max_length, padding in zip(__lowercase , __lowercase ):
__UpperCAmelCase : Optional[Any] = feat_extract(__lowercase , padding=__lowercase , max_length=__lowercase , return_tensors="""np""" )
__UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Optional[int] = range(800 , 1400 , 200 )
__UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
__UpperCAmelCase : Union[str, Any] = ["""longest""", """max_length""", """do_not_pad"""]
__UpperCAmelCase : Dict = [None, 1600, None]
for max_length, padding in zip(__lowercase , __lowercase ):
__UpperCAmelCase : List[str] = feat_extract(__lowercase , max_length=__lowercase , padding=__lowercase )
__UpperCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase : Dict = feat_extract(
__lowercase , truncation=__lowercase , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
__UpperCAmelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase : Union[str, Any] = feat_extract(
__lowercase , truncation=__lowercase , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
__UpperCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase : Optional[Any] = feat_extract(
__lowercase , truncation=__lowercase , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
__UpperCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCAmelCase ( self : str ) -> int:
import torch
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Optional[Any] = np.random.rand(100 ).astype(np.floataa )
__UpperCAmelCase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase : Union[str, Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__UpperCAmelCase : Tuple = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCAmelCase ( self : Tuple ) -> Dict:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__UpperCAmelCase : int = WavaVecaConfig.from_pretrained(__lowercase )
__UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__lowercase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 63 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 1 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : Optional[Any] = prime_factors(__lowerCamelCase )
if is_square_free(__lowerCamelCase ):
return -1 if len(__lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
__UpperCAmelCase : str = 0
__UpperCAmelCase : Any = len(__lowerCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__UpperCAmelCase : int = i + 1
else:
__UpperCAmelCase : str = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 63 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : str , *__lowercase : int , __lowercase : Optional[Any]=None , __lowercase : List[str]=None , **__lowercase : Union[str, Any] ) -> Union[str, Any]:
super().__init__(*__lowercase , **__lowercase )
__UpperCAmelCase : Optional[Any] = eval_examples
__UpperCAmelCase : int = post_process_function
def UpperCAmelCase ( self : Dict , __lowercase : Optional[Dataset] = None , __lowercase : Union[str, Any]=None , __lowercase : Optional[List[str]] = None , __lowercase : str = "eval" , **__lowercase : str , ) -> Dict[str, float]:
__UpperCAmelCase : List[Any] = gen_kwargs.copy()
__UpperCAmelCase : Union[str, Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
__UpperCAmelCase : int = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
__UpperCAmelCase : Union[str, Any] = gen_kwargs
__UpperCAmelCase : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCAmelCase : str = self.get_eval_dataloader(__lowercase )
__UpperCAmelCase : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : Union[str, Any] = self.compute_metrics
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Union[str, Any] = time.time()
__UpperCAmelCase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCAmelCase : Union[str, Any] = eval_loop(
__lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , metric_key_prefix=__lowercase , )
finally:
__UpperCAmelCase : Optional[int] = compute_metrics
__UpperCAmelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowercase , __lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__UpperCAmelCase : List[str] = self.post_process_function(__lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Dict = self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__UpperCAmelCase : int = metrics.pop(__lowercase )
metrics.update(output.metrics )
else:
__UpperCAmelCase : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__UpperCAmelCase : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowercase )
return metrics
def UpperCAmelCase ( self : List[Any] , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=None , __lowercase : str = "test" , **__lowercase : List[str] ) -> Any:
__UpperCAmelCase : str = gen_kwargs.copy()
__UpperCAmelCase : List[Any] = self.get_test_dataloader(__lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : List[Any] = self.compute_metrics
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Optional[Any] = time.time()
__UpperCAmelCase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCAmelCase : Any = eval_loop(
__lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , metric_key_prefix=__lowercase , )
finally:
__UpperCAmelCase : Dict = compute_metrics
__UpperCAmelCase : int = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowercase , __lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCAmelCase : Union[str, Any] = self.post_process_function(__lowercase , __lowercase , __lowercase , """predict""" )
__UpperCAmelCase : Optional[Any] = self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__UpperCAmelCase : List[str] = metrics.pop(__lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowercase )
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
__UpperCAmelCase : list[list[str]] = [[] for _ in range(__lowerCamelCase )]
__UpperCAmelCase : Optional[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(__lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(__lowerCamelCase ):
__UpperCAmelCase : Any = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : Any = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = ["""""".join(__lowerCamelCase ) for row in temp_grid]
__UpperCAmelCase : str = """""".join(__lowerCamelCase )
return output_string
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
__UpperCAmelCase : list[list[str]] = [[] for _ in range(__lowerCamelCase )] # generates template
for position in range(len(__lowerCamelCase ) ):
__UpperCAmelCase : List[str] = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : int = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__UpperCAmelCase : Tuple = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase : Any = input_string[counter : counter + len(__lowerCamelCase )]
grid.append(list(__lowerCamelCase ) )
counter += len(__lowerCamelCase )
__UpperCAmelCase : List[Any] = """""" # reads as zigzag
for position in range(len(__lowerCamelCase ) ):
__UpperCAmelCase : Dict = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : Any = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : Any = {}
for key_guess in range(1 , len(__lowerCamelCase ) ): # tries every key
__UpperCAmelCase : str = decrypt(__lowerCamelCase , __lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : str ):
return [ord(__lowerCamelCase ) - 96 for elem in plain]
def lowerCamelCase__ ( __lowerCamelCase : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Any = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , __lowerCamelCase )
print("""Decoded:""" , decode(__lowerCamelCase ) )
if __name__ == "__main__":
main()
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Any , __lowercase : int , __lowercase : Optional[int]=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : int=True , __lowercase : Optional[int]=True , __lowercase : Optional[Any]=True , __lowercase : Tuple=99 , __lowercase : Any=32 , __lowercase : List[Any]=5 , __lowercase : Optional[int]=4 , __lowercase : Dict=37 , __lowercase : List[Any]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : str=128 , __lowercase : List[str]=32 , __lowercase : Union[str, Any]=16 , __lowercase : Optional[int]=2 , __lowercase : Optional[int]=0.02 , __lowercase : Optional[Any]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> List[str]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : str = use_input_mask
__UpperCAmelCase : str = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : int = scope
def UpperCAmelCase ( self : Tuple ) -> Any:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_input_mask:
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self : Optional[int] ) -> int:
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = self.prepare_config_and_inputs()
__UpperCAmelCase : Any = True
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Dict , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Optional[Any] ) -> Any:
__UpperCAmelCase : Dict = NezhaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
__UpperCAmelCase : Union[str, Any] = model(__lowercase , token_type_ids=__lowercase )
__UpperCAmelCase : List[Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self : str , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : str , __lowercase : int , __lowercase : Dict , __lowercase : Dict , __lowercase : Tuple , __lowercase : Tuple , ) -> Tuple:
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = NezhaModel(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__UpperCAmelCase : Optional[int] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , )
__UpperCAmelCase : Optional[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : str ) -> Dict:
__UpperCAmelCase : int = NezhaForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int] ) -> List[str]:
__UpperCAmelCase : int = NezhaForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : str = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : str , __lowercase : Dict , __lowercase : Dict , __lowercase : Optional[int] ) -> Any:
__UpperCAmelCase : Any = NezhaForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self : int , __lowercase : int , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Any ) -> str:
__UpperCAmelCase : Tuple = NezhaForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : List[Any] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.num_labels
__UpperCAmelCase : List[str] = NezhaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Tuple , __lowercase : int , __lowercase : str , __lowercase : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : int ) -> Optional[int]:
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : Tuple = NezhaForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : int , __lowercase : Tuple , __lowercase : Dict , __lowercase : str , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : Any = self.num_choices
__UpperCAmelCase : List[Any] = NezhaForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Any = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : int ) -> int:
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Any = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
a : int = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : int = True
def UpperCAmelCase ( self : List[Any] , __lowercase : int , __lowercase : Dict , __lowercase : List[Any]=False ) -> Dict:
__UpperCAmelCase : List[Any] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
__UpperCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
__UpperCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__UpperCAmelCase : Dict = NezhaModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def UpperCAmelCase ( self : int ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Dict:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowercase )
def UpperCAmelCase ( self : List[str] ) -> int:
# This regression test was failing with PyTorch < 1.3
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCAmelCase : str = None
self.model_tester.create_and_check_model_as_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase )
def UpperCAmelCase ( self : List[str] ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Any:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = NezhaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
@require_torch_gpu
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Dict = model_class(config=__lowercase )
__UpperCAmelCase : List[str] = self._prepare_for_class(__lowercase , __lowercase )
__UpperCAmelCase : Tuple = torch.jit.trace(
__lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowercase , os.path.join(__lowercase , """bert.pt""" ) )
__UpperCAmelCase : List[str] = torch.jit.load(os.path.join(__lowercase , """bert.pt""" ) , map_location=__lowercase )
loaded(inputs_dict["""input_ids"""].to(__lowercase ) , inputs_dict["""attention_mask"""].to(__lowercase ) )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
__UpperCAmelCase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : str = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(__lowercase , attention_mask=__lowercase )[0]
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : List[Any] = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : str = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
__UpperCAmelCase : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : int = model(__lowercase , attention_mask=__lowercase )[0]
__UpperCAmelCase : Optional[int] = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : List[Any] = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1e-4 ) )
| 63 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Any = LDMTextToImagePipeline
a : Any = TEXT_TO_IMAGE_PARAMS - {
'negative_prompt',
'negative_prompt_embeds',
'cross_attention_kwargs',
'prompt_embeds',
}
a : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'callback',
'callback_steps',
}
a : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
a : List[str] = False
def UpperCAmelCase ( self : Tuple ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCAmelCase : Optional[int] = CLIPTextModel(__lowercase )
__UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] , __lowercase : int=0 ) -> Optional[int]:
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : Tuple = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Dict = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Any = LDMTextToImagePipeline(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Union[str, Any] = pipe(**__lowercase ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__UpperCAmelCase : Optional[Any] = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : str ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Any , __lowercase : int=torch.floataa , __lowercase : Any=0 ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = torch.manual_seed(__lowercase )
__UpperCAmelCase : Union[str, Any] = np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) )
__UpperCAmelCase : List[str] = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : str = self.get_inputs(__lowercase )
__UpperCAmelCase : List[str] = pipe(**__lowercase ).images
__UpperCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase : int = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
__UpperCAmelCase : Tuple = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Dict ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] , __lowercase : int , __lowercase : List[str]=torch.floataa , __lowercase : Union[str, Any]=0 ) -> Optional[int]:
__UpperCAmelCase : Tuple = torch.manual_seed(__lowercase )
__UpperCAmelCase : Dict = np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) )
__UpperCAmelCase : str = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__UpperCAmelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> List[str]:
__UpperCAmelCase : int = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_inputs(__lowercase )
__UpperCAmelCase : Dict = pipe(**__lowercase ).images[0]
__UpperCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__UpperCAmelCase : str = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def lowerCamelCase__ ( __lowerCamelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
__UpperCAmelCase : Optional[Any] = ord(__lowerCamelCase )
if not _is_chinese_char(__lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
__UpperCAmelCase : int = set()
for token in tokens:
__UpperCAmelCase : Optional[Any] = len(__lowerCamelCase ) > 1 and is_chinese(__lowerCamelCase )
if chinese_word:
word_set.add(__lowerCamelCase )
__UpperCAmelCase : Dict = list(__lowerCamelCase )
return word_list
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : set() ):
if not chinese_word_set:
return bert_tokens
__UpperCAmelCase : Tuple = max([len(__lowerCamelCase ) for w in chinese_word_set] )
__UpperCAmelCase : str = bert_tokens
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = 0, len(__lowerCamelCase )
while start < end:
__UpperCAmelCase : Optional[Any] = True
if is_chinese(bert_word[start] ):
__UpperCAmelCase : Optional[int] = min(end - start , __lowerCamelCase )
for i in range(__lowerCamelCase , 1 , -1 ):
__UpperCAmelCase : int = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__UpperCAmelCase : Optional[Any] = """##""" + bert_word[j]
__UpperCAmelCase : str = start + i
__UpperCAmelCase : Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : LTP , __lowerCamelCase : BertTokenizer ):
__UpperCAmelCase : List[str] = []
for i in range(0 , len(__lowerCamelCase ) , 100 ):
__UpperCAmelCase : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
__UpperCAmelCase : Tuple = [get_chinese_word(__lowerCamelCase ) for r in res]
ltp_res.extend(__lowerCamelCase )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
__UpperCAmelCase : List[Any] = []
for i in range(0 , len(__lowerCamelCase ) , 100 ):
__UpperCAmelCase : List[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCamelCase , truncation=__lowerCamelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = []
for input_ids, chinese_word in zip(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = []
for id in input_ids:
__UpperCAmelCase : Optional[int] = bert_tokenizer._convert_id_to_token(__lowerCamelCase )
input_tokens.append(__lowerCamelCase )
__UpperCAmelCase : Tuple = add_sub_symbol(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCamelCase ):
if token[:2] == "##":
__UpperCAmelCase : Union[str, Any] = token[2:]
# save chinese tokens' pos
if len(__lowerCamelCase ) == 1 and _is_chinese_char(ord(__lowerCamelCase ) ):
ref_id.append(__lowerCamelCase )
ref_ids.append(__lowerCamelCase )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
__UpperCAmelCase : List[str] = f.readlines()
__UpperCAmelCase : Any = [line.strip() for line in data if len(__lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__UpperCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
__UpperCAmelCase : int = BertTokenizer.from_pretrained(args.bert )
__UpperCAmelCase : Optional[Any] = prepare_ref(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
__UpperCAmelCase : Optional[Any] = [json.dumps(__lowerCamelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCamelCase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
a : Any = parser.parse_args()
main(args)
| 63 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __lowercase : Any , __lowercase : Optional[Any]=7 , __lowercase : List[str]=3 , __lowercase : str=30 , __lowercase : Tuple=400 , __lowercase : str=True , __lowercase : Dict=None , __lowercase : Dict=True , __lowercase : List[str]=[0.5, 0.5, 0.5] , __lowercase : Any=[0.5, 0.5, 0.5] , __lowercase : str=True , __lowercase : List[Any]=1 / 255 , __lowercase : Dict=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCAmelCase : Any = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = min_resolution
__UpperCAmelCase : List[Any] = max_resolution
__UpperCAmelCase : Any = do_resize
__UpperCAmelCase : Any = size
__UpperCAmelCase : Dict = do_normalize
__UpperCAmelCase : str = image_mean
__UpperCAmelCase : Optional[int] = image_std
__UpperCAmelCase : List[str] = do_rescale
__UpperCAmelCase : Optional[int] = rescale_factor
__UpperCAmelCase : Dict = do_pad
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self : Optional[int] , __lowercase : Any , __lowercase : Union[str, Any]=False ) -> int:
if not batched:
__UpperCAmelCase : List[str] = image_inputs[0]
if isinstance(__lowercase , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase : Any = image.size
else:
__UpperCAmelCase , __UpperCAmelCase : int = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
__UpperCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
__UpperCAmelCase : Union[str, Any] = self.size["""shortest_edge"""]
__UpperCAmelCase : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__UpperCAmelCase : int = self.size["""shortest_edge"""]
__UpperCAmelCase : str = self.size["""shortest_edge"""]
else:
__UpperCAmelCase : Union[str, Any] = []
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase : Optional[Any] = max(__lowercase , key=lambda __lowercase : item[0] )[0]
__UpperCAmelCase : Optional[Any] = max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Tuple = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : str = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , """image_mean""" ) )
self.assertTrue(hasattr(__lowercase , """image_std""" ) )
self.assertTrue(hasattr(__lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowercase , """do_resize""" ) )
self.assertTrue(hasattr(__lowercase , """do_rescale""" ) )
self.assertTrue(hasattr(__lowercase , """do_pad""" ) )
self.assertTrue(hasattr(__lowercase , """size""" ) )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __lowercase )
__UpperCAmelCase : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowercase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
pass
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
__UpperCAmelCase : Optional[Any] = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
# Initialize image_processing
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase : Union[str, Any] = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase : str = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self : Dict ) -> Tuple:
# Initialize image_processing
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
# prepare image and target
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__UpperCAmelCase : Optional[int] = json.loads(f.read() )
__UpperCAmelCase : Union[str, Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
__UpperCAmelCase : Dict = DeformableDetrImageProcessor()
__UpperCAmelCase : List[Any] = image_processing(images=__lowercase , annotations=__lowercase , return_tensors="""pt""" )
# verify pixel values
__UpperCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __lowercase )
__UpperCAmelCase : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowercase , atol=1e-4 ) )
# verify area
__UpperCAmelCase : Union[str, Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowercase ) )
# verify boxes
__UpperCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowercase )
__UpperCAmelCase : Optional[Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowercase , atol=1e-3 ) )
# verify image_id
__UpperCAmelCase : List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowercase ) )
# verify is_crowd
__UpperCAmelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowercase ) )
# verify class_labels
__UpperCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowercase ) )
# verify orig_size
__UpperCAmelCase : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowercase ) )
# verify size
__UpperCAmelCase : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowercase ) )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# prepare image, target and masks_path
__UpperCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__UpperCAmelCase : int = json.loads(f.read() )
__UpperCAmelCase : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
__UpperCAmelCase : Union[str, Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__UpperCAmelCase : List[Any] = DeformableDetrImageProcessor(format="""coco_panoptic""" )
__UpperCAmelCase : Tuple = image_processing(images=__lowercase , annotations=__lowercase , masks_path=__lowercase , return_tensors="""pt""" )
# verify pixel values
__UpperCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __lowercase )
__UpperCAmelCase : Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowercase , atol=1e-4 ) )
# verify area
__UpperCAmelCase : Optional[Any] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowercase ) )
# verify boxes
__UpperCAmelCase : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowercase )
__UpperCAmelCase : Tuple = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowercase , atol=1e-3 ) )
# verify image_id
__UpperCAmelCase : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowercase ) )
# verify is_crowd
__UpperCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowercase ) )
# verify class_labels
__UpperCAmelCase : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowercase ) )
# verify masks
__UpperCAmelCase : Optional[Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __lowercase )
# verify orig_size
__UpperCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowercase ) )
# verify size
__UpperCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowercase ) )
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.