code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""megatron-bert"""
def __init__( self , __UpperCAmelCase=29056 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , **__UpperCAmelCase , )-> Dict:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = use_cache
| 340
|
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A_ , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(A_ , 'num_encoder_blocks' ) )
class lowercase :
def __init__( self , A_ , A_=13 , A_=64 , A_=3 , A_=4 , A_=[2, 2, 2, 2] , A_=[8, 4, 2, 1] , A_=[16, 32, 64, 128] , A_=[1, 4, 8, 16] , A_=[1, 2, 4, 8] , A_=True , A_=True , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.02 , A_=3 , A_=None , ) -> str:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_encoder_blocks
UpperCamelCase = sr_ratios
UpperCamelCase = depths
UpperCamelCase = hidden_sizes
UpperCamelCase = downsampling_rates
UpperCamelCase = num_attention_heads
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = SegformerModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
UpperCamelCase = UpperCamelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = SegformerForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = SegformerForSemanticSegmentation(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(A_ )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertGreater(result.loss , 0.0 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[str] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowercase : Optional[Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : List[str] = True
__lowercase : int = False
__lowercase : str = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = SegformerModelTester(self )
UpperCamelCase = SegformerConfigTester(self , config_class=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A_ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.attentions
UpperCamelCase = sum(self.model_tester.depths )
self.assertEqual(len(A_ ) , A_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(A_ ) , A_ )
# verify the first attentions (first block, first layer)
UpperCamelCase = (self.model_tester.image_size // 4) ** 2
UpperCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCamelCase = (self.model_tester.image_size // 32) ** 2
UpperCamelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCamelCase = len(A_ )
# Check attention is always last and order is fine
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 1 , len(A_ ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(A_ ) , A_ )
# verify the first attentions (first block, first layer)
UpperCamelCase = (self.model_tester.image_size // 4) ** 2
UpperCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(A_ ) , A_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(A_ ):
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = SegformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=A_ , align=A_ , do_random_crop=A_ )
UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
A_ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' )
UpperCamelCase = encoded_inputs.pixel_values.to(A_ )
with torch.no_grad():
UpperCamelCase = model(A_ )
UpperCamelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=A_ , align=A_ , do_random_crop=A_ )
UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(A_ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' )
UpperCamelCase = encoded_inputs.pixel_values.to(A_ )
with torch.no_grad():
UpperCamelCase = model(A_ )
UpperCamelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A_ , atol=1e-1 ) )
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=A_ , align=A_ , do_random_crop=A_ )
UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
A_ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' )
UpperCamelCase = encoded_inputs.pixel_values.to(A_ )
with torch.no_grad():
UpperCamelCase = model(A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , A_ )
| 359
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : Union[str, Any] = 16
_UpperCAmelCase : Dict = 32
def A ( lowercase , lowercase = 16 ) -> str:
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
lowercase , batched=lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
lowercase , padding='longest' , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase , drop_last=lowercase )
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['lr']
UpperCamelCase = int(config['num_epochs'] )
UpperCamelCase = int(config['seed'] )
UpperCamelCase = int(config['batch_size'] )
UpperCamelCase = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
UpperCamelCase , UpperCamelCase = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**lowercase )
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**lowercase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase )
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase , default=lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 110
| 0
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def a_ ( lowerCamelCase ):
return getitem, k
def a_ ( lowerCamelCase , lowerCamelCase ):
return setitem, k, v
def a_ ( lowerCamelCase ):
return delitem, k
def a_ ( lowerCamelCase , lowerCamelCase , *lowerCamelCase ):
try:
return fun(lowerCamelCase , *lowerCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase__ : Union[str, Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowerCAmelCase__ : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowerCAmelCase__ : Dict = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowerCAmelCase__ : Union[str, Any] = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowerCAmelCase__ : int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase__ : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = HashMap(initial_block_size=4 )
UpperCAmelCase__ = {}
for _, (fun, *args) in enumerate(lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = _run_operation(lowerCamelCase , lowerCamelCase , *lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = _run_operation(lowerCamelCase , lowerCamelCase , *lowerCamelCase )
assert my_res == py_res
assert str(lowerCamelCase ) == str(lowerCamelCase )
assert set(lowerCamelCase ) == set(lowerCamelCase )
assert len(lowerCamelCase ) == len(lowerCamelCase )
assert set(my.items() ) == set(py.items() )
def a_ ( ):
def is_public(lowerCamelCase ) -> bool:
return not name.startswith('_' )
UpperCAmelCase__ = {name for name in dir({} ) if is_public(lowerCamelCase )}
UpperCAmelCase__ = {name for name in dir(HashMap() ) if is_public(lowerCamelCase )}
assert dict_public_names > hash_public_names
| 98
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(lowercase )
# Initialize Result
SCREAMING_SNAKE_CASE_: str =[]
# Traverse through all denomination
for denomination in reversed(lowercase ):
# Find denominations
while int(lowercase ) >= int(lowercase ):
total_value -= int(lowercase )
answer.append(lowercase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_UpperCAmelCase = []
_UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
_UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
_UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
_UpperCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f"""Following is minimal change for {value}: """)
_UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 173
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 364
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Dict = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a : Union[str, Any] = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Dict = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=_lowercase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=_lowercase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=_lowercase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=_lowercase , default="data/dump" , help="The dump file prefix." )
a : Dict = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
a : Optional[Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
a : str = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
a : List[str] = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
a : Tuple = RobertaTokenizer.from_pretrained(args.tokenizer_name )
a : Union[str, Any] = tokenizer.special_tokens_map["cls_token"] # `<s>`
a : str = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
a : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
a : Optional[int] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
a : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
a : List[Any] = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(_lowercase )} examples to process.""" )
a : Optional[Any] = []
a : Optional[Any] = 0
a : int = 1_0000
a : Dict = time.time()
for text in data:
a : List[Any] = F"""{bos} {text.strip()} {sep}"""
a : Optional[int] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
rslt.append(_lowercase )
iter += 1
if iter % interval == 0:
a : Optional[Any] = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
a : Optional[Any] = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(_lowercase )} examples processed.""" )
a : Optional[int] = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
a : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
a : Optional[int] = [np.uintaa(_lowercase ) for d in rslt]
else:
a : Optional[Any] = [np.intaa(_lowercase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(_lowercase , "wb" ) as handle:
pickle.dump(rslt_ , _lowercase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 105
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __snake_case ( a ):
UpperCAmelCase__ : Union[List[PIL.Image.Image], np.ndarray]
UpperCAmelCase__ : Optional[List[bool]]
UpperCAmelCase__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 353
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7
| 0
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ : str ='''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ : Optional[int] =importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ : str =spec.loader.load_module()
lowerCAmelCase__ : Optional[Any] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ : Optional[Any] =re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ : List[Any] ={
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __lowercase ( ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = []
for config_class in list(CONFIG_MAPPING.values() ):
__SCREAMING_SNAKE_CASE = False
# source code of `config_class`
__SCREAMING_SNAKE_CASE = inspect.getsource(a__ )
__SCREAMING_SNAKE_CASE = _re_checkpoint.findall(a__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__SCREAMING_SNAKE_CASE = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__SCREAMING_SNAKE_CASE = True
break
__SCREAMING_SNAKE_CASE = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(a__ )
if len(a__ ) > 0:
__SCREAMING_SNAKE_CASE = '\n'.join(sorted(a__ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 257
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : jnp.ndarray
UpperCamelCase__ : jnp.ndarray
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ : int
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels ) - 1 ):
__SCREAMING_SNAKE_CASE = self.block_out_channels[i]
__SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = blocks
__SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
for block in self.blocks:
__SCREAMING_SNAKE_CASE = block(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
__SCREAMING_SNAKE_CASE = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = 32
UpperCamelCase__ : int = 4
UpperCamelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ : Union[bool, Tuple[bool]] = False
UpperCamelCase__ : Tuple[int] = (320, 640, 1280, 1280)
UpperCamelCase__ : int = 2
UpperCamelCase__ : Union[int, Tuple[int]] = 8
UpperCamelCase__ : Optional[Union[int, Tuple[int]]] = None
UpperCamelCase__ : int = 1280
UpperCamelCase__ : float = 0.0
UpperCamelCase__ : bool = False
UpperCamelCase__ : jnp.dtype = jnp.floataa
UpperCamelCase__ : bool = True
UpperCamelCase__ : int = 0
UpperCamelCase__ : str = "rgb"
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = jax.random.split(_A )
__SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.block_out_channels
__SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(_A , dtype=self.dtype )
__SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = block_out_channels[0]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE = output_channel
__SCREAMING_SNAKE_CASE = block_out_channels[i]
__SCREAMING_SNAKE_CASE = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
__SCREAMING_SNAKE_CASE = down_blocks
__SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
__SCREAMING_SNAKE_CASE = block_out_channels[-1]
__SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__SCREAMING_SNAKE_CASE = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
__SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(_A , 0 )
__SCREAMING_SNAKE_CASE = self.time_proj(_A )
__SCREAMING_SNAKE_CASE = self.time_embedding(_A )
# 2. pre-process
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
__SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , _A , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__SCREAMING_SNAKE_CASE = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
__SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
__SCREAMING_SNAKE_CASE = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
__SCREAMING_SNAKE_CASE = self.controlnet_mid_block(_A )
# 6. scaling
__SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257
| 1
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
a = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
a = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
a = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(_UpperCAmelCase , _UpperCAmelCase )["wer"]
else:
_A = 0
_A = 0
for prediction, reference in zip(_UpperCAmelCase , _UpperCAmelCase ):
_A = compute_measures(_UpperCAmelCase , _UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 271
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
a = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
a = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def _snake_case ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
_A = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_A = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_A = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def _snake_case ( _snake_case : List[Any] , _snake_case : int , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : int=None ) -> Optional[int]:
'''simple docstring'''
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_A = checkpoint[F'''{old_prefix}.norm.weight''']
_A = checkpoint[F'''{old_prefix}.norm.bias''']
_A = weight_q.squeeze(-1 ).squeeze(-1 )
_A = bias_q.squeeze(-1 ).squeeze(-1 )
_A = weight_k.squeeze(-1 ).squeeze(-1 )
_A = bias_k.squeeze(-1 ).squeeze(-1 )
_A = weight_v.squeeze(-1 ).squeeze(-1 )
_A = bias_v.squeeze(-1 ).squeeze(-1 )
_A = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_A = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _snake_case ( _snake_case : str , _snake_case : Any ) -> str:
'''simple docstring'''
_A = torch.load(_snake_case , map_location='cpu' )
_A = {}
_A = checkpoint['time_embed.0.weight']
_A = checkpoint['time_embed.0.bias']
_A = checkpoint['time_embed.2.weight']
_A = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_A = checkpoint['label_emb.weight']
_A = checkpoint['input_blocks.0.0.weight']
_A = checkpoint['input_blocks.0.0.bias']
_A = unet_config['down_block_types']
_A = unet_config['layers_per_block']
_A = unet_config['attention_head_dim']
_A = unet_config['block_out_channels']
_A = 1
_A = channels_list[0]
for i, layer_type in enumerate(_snake_case ):
_A = channels_list[i]
_A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_snake_case ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_snake_case ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
_A = F'''down_blocks.{i}.attentions.{j}'''
_A = F'''input_blocks.{current_layer}.1'''
_A = convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''down_blocks.{i}.downsamplers.0'''
_A = F'''input_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
_A = current_channels
# hardcoded the mid-block for now
_A = 'mid_block.resnets.0'
_A = 'middle_block.0'
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = 'mid_block.attentions.0'
_A = 'middle_block.1'
_A = convert_attention(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
_A = 'mid_block.resnets.1'
_A = 'middle_block.2'
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = 0
_A = unet_config['up_block_types']
for i, layer_type in enumerate(_snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.1'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
_A = F'''up_blocks.{i}.attentions.{j}'''
_A = F'''output_blocks.{current_layer}.1'''
_A = convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.2'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = checkpoint['out.0.weight']
_A = checkpoint['out.0.bias']
_A = checkpoint['out.2.weight']
_A = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 271
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: Optional[Any] = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class SCREAMING_SNAKE_CASE( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase__ = '''decision_transformer'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict , __snake_case : Tuple=17 , __snake_case : Dict=4 , __snake_case : Dict=128 , __snake_case : Union[str, Any]=4096 , __snake_case : int=True , __snake_case : Union[str, Any]=1 , __snake_case : Tuple=1024 , __snake_case : int=3 , __snake_case : List[str]=1 , __snake_case : Tuple=None , __snake_case : Any="relu" , __snake_case : int=0.1 , __snake_case : Dict=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : List[Any]=1E-5 , __snake_case : List[str]=0.02 , __snake_case : int=True , __snake_case : List[Any]=True , __snake_case : List[Any]=50256 , __snake_case : Optional[int]=50256 , __snake_case : str=False , __snake_case : Union[str, Any]=False , **__snake_case : Tuple , ) -> Optional[int]:
UpperCAmelCase : List[Any] = state_dim
UpperCAmelCase : Union[str, Any] = act_dim
UpperCAmelCase : int = hidden_size
UpperCAmelCase : int = max_ep_len
UpperCAmelCase : Any = action_tanh
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : Any = n_head
UpperCAmelCase : Any = n_inner
UpperCAmelCase : Any = activation_function
UpperCAmelCase : str = resid_pdrop
UpperCAmelCase : List[str] = embd_pdrop
UpperCAmelCase : Optional[int] = attn_pdrop
UpperCAmelCase : List[str] = layer_norm_epsilon
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : List[str] = scale_attn_weights
UpperCAmelCase : Tuple = use_cache
UpperCAmelCase : Optional[int] = scale_attn_by_inverse_layer_idx
UpperCAmelCase : Any = reorder_and_upcast_attn
UpperCAmelCase : Dict = bos_token_id
UpperCAmelCase : Dict = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 23
|
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(SCREAMING_SNAKE_CASE , exponent // 2 , SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE , exponent - 1 , SCREAMING_SNAKE_CASE )) % modulo_value
def _a ( SCREAMING_SNAKE_CASE = 17_77 , SCREAMING_SNAKE_CASE = 18_55 , SCREAMING_SNAKE_CASE = 8 ):
"""simple docstring"""
lowercase__ = base
for _ in range(1 , SCREAMING_SNAKE_CASE ):
lowercase__ = _modexpt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 110
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : int = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 141
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=A__ , speech_processor=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , unet=A__ , scheduler=A__ , feature_extractor=A__ , )
def __A ( self , A__ = "auto" ):
if slice_size == "auto":
A__ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__ )
def __A ( self ):
self.enable_attention_slicing(A__ )
@torch.no_grad()
def __call__( self , A__ , A__=1_6000 , A__ = 512 , A__ = 512 , A__ = 50 , A__ = 7.5 , A__ = None , A__ = 1 , A__ = 0.0 , A__ = None , A__ = None , A__ = "pil" , A__ = True , A__ = None , A__ = 1 , **A__ , ):
A__ : Any = self.speech_processor.feature_extractor(
A__ , return_tensors="""pt""" , sampling_rate=A__ ).input_features.to(self.device )
A__ : Optional[Any] = self.speech_model.generate(A__ , max_length=48_0000 )
A__ : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(A__ , skip_special_tokens=A__ , normalize=A__ )[
0
]
if isinstance(A__ , A__ ):
A__ : Dict = 1
elif isinstance(A__ , A__ ):
A__ : Optional[int] = len(A__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(A__ )}.""" )
# get prompt text embeddings
A__ : Optional[int] = self.tokenizer(
A__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
A__ : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
A__ : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ : List[str] = text_embeddings.shape
A__ : Dict = text_embeddings.repeat(1 , A__ , 1 )
A__ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ : List[str]
if negative_prompt is None:
A__ : Union[str, Any] = [""""""] * batch_size
elif type(A__ ) is not type(A__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(A__ )} !="""
F""" {type(A__ )}.""" )
elif isinstance(A__ , A__ ):
A__ : Union[str, Any] = [negative_prompt]
elif batch_size != len(A__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(A__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
A__ : int = negative_prompt
A__ : Union[str, Any] = text_input_ids.shape[-1]
A__ : int = self.tokenizer(
A__ , padding="""max_length""" , max_length=A__ , truncation=A__ , return_tensors="""pt""" , )
A__ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ : List[Any] = uncond_embeddings.shape[1]
A__ : Any = uncond_embeddings.repeat(1 , A__ , 1 )
A__ : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ : Optional[Any] = torch.randn(A__ , generator=A__ , device="""cpu""" , dtype=A__ ).to(
self.device )
else:
A__ : str = torch.randn(A__ , generator=A__ , device=self.device , dtype=A__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A__ : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ : Any = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ : Tuple = {}
if accepts_eta:
A__ : str = eta
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
A__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ : Tuple = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
A__ : Union[str, Any] = self.unet(A__ , A__ , encoder_hidden_states=A__ ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ : List[Any] = noise_pred.chunk(2 )
A__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ : Tuple = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ , A__ )
A__ : str = 1 / 0.1_8_2_1_5 * latents
A__ : Optional[Any] = self.vae.decode(A__ ).sample
A__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ : Optional[Any] = self.numpy_to_pil(A__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A__ , nsfw_content_detected=A__ )
| 141
| 1
|
from math import pi
def lowercase_ ( _A : Any , _A : List[str] ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 184
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase__ ( _A , _A ):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : List[str]=7 , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=30 , snake_case__ : Dict=400 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , snake_case__ : str=True , snake_case__ : str=[0.5, 0.5, 0.5] , snake_case__ : int=[0.5, 0.5, 0.5] , snake_case__ : int=True , snake_case__ : Optional[int]=1 / 255 , snake_case__ : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ : List[str] =size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ : List[str] =parent
lowerCamelCase_ : Any =batch_size
lowerCamelCase_ : Any =num_channels
lowerCamelCase_ : List[Any] =min_resolution
lowerCamelCase_ : Any =max_resolution
lowerCamelCase_ : str =do_resize
lowerCamelCase_ : Union[str, Any] =size
lowerCamelCase_ : Dict =do_normalize
lowerCamelCase_ : Dict =image_mean
lowerCamelCase_ : Any =image_std
lowerCamelCase_ : List[Any] =do_rescale
lowerCamelCase_ : Tuple =rescale_factor
lowerCamelCase_ : Dict =do_pad
def UpperCAmelCase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Any , snake_case__ : str , snake_case__ : Any=False ):
if not batched:
lowerCamelCase_ : str =image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
lowerCamelCase_ : Dict =image.size
else:
lowerCamelCase_ : int =image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ : Union[str, Any] =int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ : Dict =self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ : int =self.size["shortest_edge"]
lowerCamelCase_ : Dict =int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ : Optional[int] =self.size["shortest_edge"]
lowerCamelCase_ : Tuple =self.size["shortest_edge"]
else:
lowerCamelCase_ : Dict =[]
for image in image_inputs:
lowerCamelCase_ : Union[str, Any] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ : Tuple =max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
lowerCamelCase_ : Tuple =max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Any =ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , snake_case__ )
lowerCamelCase_ : List[Any] =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
lowerCamelCase_ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ : Tuple =self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : Union[str, Any] =self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
lowerCamelCase_ : Union[str, Any] =image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : List[str] ):
# Initialize image_processing
lowerCamelCase_ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowerCamelCase_ : str =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ : List[Any] =self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : Union[str, Any] =image_processing(snake_case__ , return_tensors="pt" ).pixel_values
lowerCamelCase_ : List[str] =self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : List[Any] ):
# Initialize image_processing
lowerCamelCase_ : str =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Dict =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ : Optional[Any] =self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : List[Any] =image_processing(snake_case__ , return_tensors="pt" ).pixel_values
lowerCamelCase_ : int =self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
# prepare image and target
lowerCamelCase_ : List[str] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ : Optional[Any] =json.loads(f.read() )
lowerCamelCase_ : str ={"image_id": 3_9769, "annotations": target}
# encode them
lowerCamelCase_ : Any =ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
lowerCamelCase_ : Tuple =image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ : str =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
lowerCamelCase_ : List[Any] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Dict =torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
lowerCamelCase_ : Any =torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
lowerCamelCase_ : List[str] =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : List[str] =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
lowerCamelCase_ : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
lowerCamelCase_ : Any =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify orig_size
lowerCamelCase_ : Union[str, Any] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
lowerCamelCase_ : Optional[Any] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
# prepare image, target and masks_path
lowerCamelCase_ : List[str] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ : Union[str, Any] =json.loads(f.read() )
lowerCamelCase_ : int ={"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowerCamelCase_ : Optional[int] =pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ : List[Any] =ConditionalDetrImageProcessor(format="coco_panoptic" )
lowerCamelCase_ : Dict =image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ : Any =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
lowerCamelCase_ : Optional[int] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Dict =torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
lowerCamelCase_ : int =torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
lowerCamelCase_ : Optional[Any] =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : Tuple =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
lowerCamelCase_ : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
lowerCamelCase_ : int =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify masks
lowerCamelCase_ : Tuple =82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , snake_case__ )
# verify orig_size
lowerCamelCase_ : List[str] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
lowerCamelCase_ : List[Any] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
| 364
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase__ ( snake_case__ ):
def __init__( self : Tuple , snake_case__ : Optional[int] , snake_case__ : int=None , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=None , **snake_case__ : Optional[int] ):
lowerCamelCase_ : Dict =parent
lowerCamelCase_ : List[str] =config_class
lowerCamelCase_ : Union[str, Any] =has_text_modality
lowerCamelCase_ : Optional[int] =kwargs
lowerCamelCase_ : List[str] =common_properties
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : List[str] =self.config_class(**self.inputs_dict )
lowerCamelCase_ : Any =(
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(snake_case__ , snake_case__ ) , msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(snake_case__ ):
try:
setattr(snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(snake_case__ ):
try:
lowerCamelCase_ : Dict =self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Tuple =self.config_class(**self.inputs_dict )
lowerCamelCase_ : Any =json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , snake_case__ )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Tuple =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : List[Any] =os.path.join(snake_case__ , "config.json" )
config_first.to_json_file(snake_case__ )
lowerCamelCase_ : Optional[int] =self.config_class.from_json_file(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Dict =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(snake_case__ )
lowerCamelCase_ : Optional[int] =self.config_class.from_pretrained(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Dict =self.config_class(**self.inputs_dict )
lowerCamelCase_ : Dict ="test"
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : str =os.path.join(snake_case__ , snake_case__ )
config_first.save_pretrained(snake_case__ )
lowerCamelCase_ : Optional[Any] =self.config_class.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Optional[Any] =self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowerCamelCase_ : List[Any] =3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase__ ( self : List[Any] ):
if self.config_class.is_composition:
return
lowerCamelCase_ : Tuple =self.config_class()
self.parent.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : List[str] =copy.deepcopy(snake_case__ )
lowerCamelCase_ : Optional[int] =self.config_class(**snake_case__ )
lowerCamelCase_ : Union[str, Any] =[]
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(snake_case__ , snake_case__ ) != value:
wrong_values.append((key, getattr(snake_case__ , snake_case__ ), value) )
if len(snake_case__ ) > 0:
lowerCamelCase_ : Any ="\n".join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def UpperCAmelCase__ ( self : int ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 209
| 0
|
"""simple docstring"""
def a__ ( snake_case__ = 60_08_51_47_51_43 ) -> int:
try:
lowerCamelCase = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowerCamelCase = 2
lowerCamelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCamelCase = i
while n % i == 0:
lowerCamelCase = n // i
i += 1
return int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 291
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = "cpu" , SCREAMING_SNAKE_CASE__ : Union[str, None] = None ) -> None:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
fire.Fire(convert)
| 7
| 0
|
"""simple docstring"""
import sys
import turtle
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__UpperCamelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__UpperCamelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 366
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 0
|
'''simple docstring'''
def UpperCAmelCase_ (__a : list , __a : list , __a : int ):
"""simple docstring"""
_a : Optional[Any] = len(__a )
_a : int = [[0] * n for i in range(__a )]
for i in range(__a ):
_a : Tuple = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
_a : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
|
'''simple docstring'''
def UpperCAmelCase_ (__a : list , __a : list , __a : int ):
"""simple docstring"""
_a : Optional[Any] = len(__a )
_a : int = [[0] * n for i in range(__a )]
for i in range(__a ):
_a : Tuple = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
_a : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : torch.FloatTensor
class lowerCamelCase ( A_ , A_ ):
@register_to_config
def __init__(self : Optional[int] , _A : int = 3 , _A : int = 3 , _A : Tuple[str] = ("DownEncoderBlock2D",) , _A : Tuple[str] = ("UpDecoderBlock2D",) , _A : Tuple[int] = (6_4,) , _A : int = 1 , _A : str = "silu" , _A : int = 3 , _A : int = 3_2 , _A : int = 2_5_6 , _A : int = 3_2 , _A : Optional[int] = None , _A : float = 0.1_82_15 , _A : str = "group" , ) -> List[str]:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(_A , _A , 1 )
snake_case = VectorQuantizer(_A , _A , beta=0.25 , remap=_A , sane_index_shape=_A )
snake_case = nn.Convad(_A , _A , 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , )
@apply_forward_hook
def UpperCAmelCase(self : int , _A : torch.FloatTensor , _A : bool = True ) -> VQEncoderOutput:
snake_case = self.encoder(_A )
snake_case = self.quant_conv(_A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_A )
@apply_forward_hook
def UpperCAmelCase(self : Union[str, Any] , _A : torch.FloatTensor , _A : bool = False , _A : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(_A )
else:
snake_case = h
snake_case = self.post_quant_conv(_A )
snake_case = self.decoder(_A , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def UpperCAmelCase(self : Optional[Any] , _A : torch.FloatTensor , _A : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(_A ).latents
snake_case = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
| 137
|
from __future__ import annotations
def lowercase_ ( A__ , A__ , A__ ) -> int | float:
"""simple docstring"""
if len(A__ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
snake_case = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 137
| 1
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
UpperCAmelCase = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
UpperCAmelCase = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : Optional[int] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def snake_case ( self : Union[str, Any] , __lowercase : List[List[List[str]]] , __lowercase : List[List[str]] , __lowercase : int = 1 , __lowercase : int = 4 , ):
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowercase , hypotheses=__lowercase , min_len=__lowercase , max_len=__lowercase )
}
| 141
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowercase ):
__lowercase =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowercase =FlaxAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowercase ):
__lowercase =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowercase =FlaxAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__lowercase =AutoTokenizer.from_pretrained(__lowercase )
__lowercase =FlaxBertModel.from_pretrained(__lowercase )
__lowercase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowercase : Optional[Any] ):
return model(**__lowercase )
eval(**__lowercase ).block_until_ready()
@slow
def snake_case ( self : List[Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
__lowercase =AutoTokenizer.from_pretrained(__lowercase )
__lowercase =FlaxRobertaModel.from_pretrained(__lowercase )
__lowercase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowercase : Dict ):
return model(**__lowercase )
eval(**__lowercase ).block_until_ready()
def snake_case ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
__lowercase =FlaxAutoModel.from_pretrained('bert-base' )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowercase =FlaxAutoModel.from_pretrained(__lowercase , revision='aaaaaa' )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__lowercase =FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def snake_case ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(__lowercase , 'Use `from_pt=True` to load this model' ):
__lowercase =FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 141
| 1
|
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _lowerCAmelCase ( lowercase_=32 , lowercase_=10 , lowercase_=100 , lowercase_=1026 , lowercase_=True , lowercase_="data/tokenized_stories_train_wikitext103.jbl" , lowercase_="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
UpperCAmelCase , UpperCAmelCase = generate_datasets(
lowercase_ , lowercase_ , number=lowercase_ , min_len=1026 , trim=lowercase_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCAmelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
UpperCAmelCase = load_gpta('gpt2' ).to(lowercase_ )
print('computing perplexity on objective set' )
UpperCAmelCase = compute_perplexity(lowercase_ , lowercase_ , lowercase_ ).item()
print('perplexity on objective set:' , lowercase_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _lowerCAmelCase ( lowercase_ , lowercase_=15 , lowercase_=128 , lowercase_=100 , lowercase_="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
UpperCAmelCase = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
UpperCAmelCase = SecondaryLearner(lowercase_ )
# Train secondary learner
UpperCAmelCase = train_secondary_learner(
lowercase_ , lowercase_ , max_epochs=lowercase_ , batch_size=lowercase_ , eval_freq=100 , igf_model_path=lowercase_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=32 , lowercase_=1000 , lowercase_=16 , lowercase_=1.0 , lowercase_=recopy_gpta , lowercase_=None , lowercase_=10 , lowercase_="gpt2_finetuned.pt" , ):
UpperCAmelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase = RandomSampler(lowercase_ )
UpperCAmelCase = DataLoader(lowercase_ , sampler=lowercase_ )
UpperCAmelCase = max_steps // (len(lowercase_ )) + 1
UpperCAmelCase = 0
UpperCAmelCase = torch.zeros((1, context_len) , dtype=torch.long , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = recopy_model(lowercase_ , lowercase_ , lowercase_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowercase_ )
secondary_learner.eval()
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = []
UpperCAmelCase = []
# Compute the performance of the transformer model at the beginning
UpperCAmelCase = compute_perplexity(lowercase_ , lowercase_ , lowercase_ )
test_perps.append(lowercase_ )
print('Test perplexity, step' , lowercase_ , ':' , lowercase_ )
for epoch in range(int(lowercase_ ) ):
for step, example in enumerate(lowercase_ ):
torch.cuda.empty_cache()
UpperCAmelCase = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCAmelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCAmelCase = model(lowercase_ , labels=lowercase_ )
UpperCAmelCase = True
if secondary_learner is not None:
UpperCAmelCase = secondary_learner.forward(
torch.tensor(lowercase_ , dtype=torch.long , device=lowercase_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowercase_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCAmelCase = -1
if predicted_q < threshold:
UpperCAmelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCAmelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCAmelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCAmelCase = compute_perplexity(lowercase_ , lowercase_ , lowercase_ )
test_perps.append(lowercase_ )
print('Test perplexity, step' , lowercase_ , ':' , lowercase_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowercase_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=lowercase_ , type=lowercase_ , required=lowercase_ , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=lowercase_ , type=lowercase_ , required=lowercase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=lowercase_ , default=lowercase_ , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=lowercase_ , default=lowercase_ , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=lowercase_ , type=lowercase_ , required=lowercase_ , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=lowercase_ , type=lowercase_ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=lowercase_ , default=lowercase_ , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=lowercase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=lowercase_ , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=lowercase_ , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=lowercase_ , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=lowercase_ , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=lowercase_ , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=lowercase_ , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=lowercase_ , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=lowercase_ , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=lowercase_ , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=lowercase_ , type=lowercase_ , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=lowercase_ , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=lowercase_ , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=lowercase_ , type=lowercase_ , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=lowercase_ , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCAmelCase = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
UpperCAmelCase = training_secondary_learner(
lowercase_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCAmelCase = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCAmelCase , UpperCAmelCase = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=lowercase_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowercase_ , lowercase_ , lowercase_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=lowercase_ , secondary_learner=lowercase_ , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 181
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , lowercase_ :Optional[Any] , lowercase_ :int=13 , lowercase_ :Optional[Any]=7 , lowercase_ :List[str]=True , lowercase_ :Dict=True , lowercase_ :str=True , lowercase_ :Optional[Any]=True , lowercase_ :Dict=99 , lowercase_ :int=32 , lowercase_ :str=5 , lowercase_ :Dict=4 , lowercase_ :Tuple=37 , lowercase_ :Dict="gelu" , lowercase_ :List[str]=0.1 , lowercase_ :int=0.1 , lowercase_ :Any=5_12 , lowercase_ :Optional[Any]=16 , lowercase_ :Optional[int]=2 , lowercase_ :Union[str, Any]=0.02 , lowercase_ :Dict=False , lowercase_ :Tuple=True , lowercase_ :Optional[Any]="None" , lowercase_ :int=3 , lowercase_ :Tuple=4 , lowercase_ :Optional[int]=None , ) -> Tuple:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = relative_attention
UpperCAmelCase = position_biased_input
UpperCAmelCase = pos_att_type
UpperCAmelCase = scope
def UpperCAmelCase__ ( self :Any ) -> Tuple:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :Tuple ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> List[str]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str , lowercase_ :Tuple , lowercase_ :str , lowercase_ :int , lowercase_ :Union[str, Any] , lowercase_ :List[str] , lowercase_ :Optional[int] ) -> Optional[int]:
UpperCAmelCase = DebertaVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Dict , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :int ) -> Any:
UpperCAmelCase = DebertaVaForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :Any , lowercase_ :Dict , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaVaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :Union[str, Any] , lowercase_ :Any , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Any ) -> List[Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaVaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :Any , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :Optional[int] ) -> Dict:
UpperCAmelCase = DebertaVaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Any , lowercase_ :Any ) -> List[Any]:
UpperCAmelCase = DebertaVaForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :Dict ) -> int:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[int]:
UpperCAmelCase = DebertaVaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowercase_ )
@slow
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DebertaVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def UpperCAmelCase__ ( self :str ) -> Tuple:
pass
@slow
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
UpperCAmelCase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCAmelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 181
| 1
|
"""simple docstring"""
def A ( snake_case :Tuple = 1_0_0 ) -> int:
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = "▁"
_a = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_a = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_a = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
_a = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="m2m100" , __lowerCAmelCase = None , __lowerCAmelCase=8 , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = language_codes
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCamelCase__ = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
lowerCamelCase__ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = load_json(__lowerCAmelCase )
lowerCamelCase__ = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ = spm_file
lowerCamelCase__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
lowerCamelCase__ = len(self.encoder )
lowerCamelCase__ = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
lowerCamelCase__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
lowerCamelCase__ = {v: k for k, v in self.lang_token_to_id.items()}
lowerCamelCase__ = src_lang if src_lang is not None else '''en'''
lowerCamelCase__ = tgt_lang
lowerCamelCase__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCamelCase__ = num_madeup_words
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = [1] * len(self.prefix_tokens )
lowerCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = load_spm(self.spm_file , self.sp_model_kwargs )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
lowerCamelCase__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = "en" , __lowerCAmelCase = None , __lowerCAmelCase = "ro" , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = self.get_lang_id(__lowerCAmelCase )
lowerCamelCase__ = tgt_lang_id
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
lowerCamelCase__ = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def lowerCAmelCase__(__snake_case ) -> Union[Dict, List]:
'''simple docstring'''
with open(__snake_case ,'''r''' ) as f:
return json.load(__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> None:
'''simple docstring'''
with open(__snake_case ,'''w''' ) as f:
json.dump(__snake_case ,__snake_case ,indent=2 )
| 209
| 0
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Dict =["image_processor", "tokenizer"]
__A : List[str] ="BlipImageProcessor"
__A : Optional[int] ="AutoTokenizer"
def __init__( self ,_snake_case ,_snake_case ,_snake_case ):
super().__init__(_snake_case ,_snake_case )
# add QFormer tokenizer
UpperCAmelCase_ : List[str] = qformer_tokenizer
def __call__( self ,_snake_case = None ,_snake_case = None ,_snake_case = True ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = 0 ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = False ,_snake_case = False ,_snake_case = False ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
UpperCAmelCase_ : str = BatchFeature()
if text is not None:
UpperCAmelCase_ : List[Any] = self.tokenizer(
text=_snake_case ,add_special_tokens=_snake_case ,padding=_snake_case ,truncation=_snake_case ,max_length=_snake_case ,stride=_snake_case ,pad_to_multiple_of=_snake_case ,return_attention_mask=_snake_case ,return_overflowing_tokens=_snake_case ,return_special_tokens_mask=_snake_case ,return_offsets_mapping=_snake_case ,return_token_type_ids=_snake_case ,return_length=_snake_case ,verbose=_snake_case ,return_tensors=_snake_case ,**_snake_case ,)
encoding.update(_snake_case )
UpperCAmelCase_ : int = self.qformer_tokenizer(
text=_snake_case ,add_special_tokens=_snake_case ,padding=_snake_case ,truncation=_snake_case ,max_length=_snake_case ,stride=_snake_case ,pad_to_multiple_of=_snake_case ,return_attention_mask=_snake_case ,return_overflowing_tokens=_snake_case ,return_special_tokens_mask=_snake_case ,return_offsets_mapping=_snake_case ,return_token_type_ids=_snake_case ,return_length=_snake_case ,verbose=_snake_case ,return_tensors=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = qformer_text_encoding.pop("input_ids" )
UpperCAmelCase_ : Tuple = qformer_text_encoding.pop("attention_mask" )
if images is not None:
UpperCAmelCase_ : Dict = self.image_processor(_snake_case ,return_tensors=_snake_case )
encoding.update(_snake_case )
return encoding
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.tokenizer.model_input_names
UpperCAmelCase_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase__ ( self ,_snake_case ,**_snake_case ):
if os.path.isfile(_snake_case ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_snake_case ,exist_ok=_snake_case )
UpperCAmelCase_ : List[Any] = os.path.join(_snake_case ,"qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(_snake_case )
return super().save_pretrained(_snake_case ,**_snake_case )
@classmethod
def UpperCamelCase__ ( cls ,_snake_case ,**_snake_case ):
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_snake_case ,subfolder="qformer_tokenizer" )
UpperCAmelCase_ : List[Any] = cls._get_arguments_from_pretrained(_snake_case ,**_snake_case )
args.append(_snake_case )
return cls(*_snake_case )
| 363
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def a__ ( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 67
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def A ( snake_case :List[Any] , snake_case :Tuple , snake_case :Union[str, Any] , snake_case :Dict , snake_case :int ) -> str:
for attribute in key.split('.' ):
__UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
__UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
__UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A ( snake_case :Tuple , snake_case :Union[str, Any] ) -> int:
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__UpperCamelCase = None
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
elif name.split('.' )[0] == "proj":
__UpperCamelCase = fairseq_model.proj
__UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__UpperCamelCase )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , __UpperCamelCase )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def A ( snake_case :Any , snake_case :List[Any] , snake_case :Tuple , snake_case :Any , snake_case :Optional[Any] ) -> str:
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__UpperCamelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__UpperCamelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__UpperCamelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__UpperCamelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCamelCase )
def A ( snake_case :Any ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
__UpperCamelCase = emb.weight.data
return lin_layer
def A ( snake_case :List[Any] ) -> Union[str, Any]:
with open(__UpperCamelCase , 'r' , encoding='utf-8' ) as f:
__UpperCamelCase = f.readlines()
__UpperCamelCase = [line.split(' ' )[0] for line in lines]
__UpperCamelCase = len(__UpperCamelCase )
__UpperCamelCase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def A ( snake_case :int , snake_case :Dict , snake_case :Optional[int] , snake_case :Optional[int] , snake_case :str , snake_case :List[str] , snake_case :Dict , ) -> str:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__UpperCamelCase )
__UpperCamelCase = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase , vocab_size=__UpperCamelCase , decoder_layers=__UpperCamelCase , do_stable_layer_norm=__UpperCamelCase )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__UpperCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
__UpperCamelCase = WavaVecaModel(__UpperCamelCase )
__UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __UpperCamelCase )
__UpperCamelCase = SpeechaTextaForCausalLM(__UpperCamelCase )
__UpperCamelCase , __UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove('embed_out' )
__UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
__UpperCamelCase = SpeechEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
__UpperCamelCase = False
# add projection layer
__UpperCamelCase = nn.Parameter(projection_layer.weight )
__UpperCamelCase = nn.Parameter(projection_layer.bias )
__UpperCamelCase = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , 'vocab.json' ) , 'w' ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase , 'vocab.json' ) )
tokenizer.save_pretrained(__UpperCamelCase )
__UpperCamelCase = hf_wavavec.config.to_dict()
__UpperCamelCase = tokenizer.pad_token_id
__UpperCamelCase = tokenizer.bos_token_id
__UpperCamelCase = tokenizer.eos_token_id
__UpperCamelCase = 'speech_to_text_2'
__UpperCamelCase = 'wav2vec2'
__UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
UpperCamelCase : str = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 316
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,):
"""simple docstring"""
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def __snake_case ( __UpperCamelCase : NDArray[floataa] ):
"""simple docstring"""
A_ , A_ = table.shape
A_ = True
for i in range(0 ,__UpperCamelCase ):
A_ = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __lowercase ):
"""simple docstring"""
_lowerCAmelCase : int = ["""image_processor""", """tokenizer"""]
_lowerCAmelCase : Dict = """ViTImageProcessor"""
_lowerCAmelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
"""simple docstring"""
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _a , )
snake_case = kwargs.pop('feature_extractor' )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_a , _a )
def __call__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
snake_case = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
snake_case = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
snake_case = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
snake_case = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
snake_case = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def snake_case ( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a )
def snake_case ( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a )
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _a , )
return self.image_processor_class
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _a , )
return self.image_processor
| 350
|
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False ) -> Any:
"""simple docstring"""
try:
snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case = default
else:
# KEY is set, convert it to True or False.
try:
snake_case = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE__ = parse_flag_from_env("RUN_SLOW", default=False)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return unittest.skip('Test was skipped' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=None ) -> int:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCamelCase ) , f"""test requires torch version >= {version}""" )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCamelCase )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = True
@classmethod
def snake_case ( cls ):
"""simple docstring"""
snake_case = tempfile.mkdtemp()
@classmethod
def snake_case ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = mocks if isinstance(lowerCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case = AcceleratorState()
snake_case = tensor[None].clone().to(state.device )
snake_case = gather(_UpperCamelCase ).cpu()
snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = returncode
snake_case = stdout
snake_case = stderr
async def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
while True:
snake_case = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(_UpperCamelCase ) )
snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case = []
snake_case = []
def tee(_UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]="" ):
snake_case = line.decode('utf-8' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : str=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Tuple=1_8_0 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[Any]=True ) -> _RunOutput:
"""simple docstring"""
snake_case = asyncio.get_event_loop()
snake_case = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case = ' '.join(_UpperCamelCase )
if result.returncode > 0:
snake_case = '\n'.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
try:
snake_case = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , 'decode' ):
snake_case = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 149
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[Any] = {'vocab_file': 'spiece.model'}
a_ : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
a_ : Tuple = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
a_ : int = 0
a_ : Optional[int] = 1
a_ : int = 2
a_ : Union[str, Any] = 3
a_ : List[str] = 4
class _snake_case ( A__ ):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = '''left'''
def __init__( self , a , a=False , a=True , a=False , a="<s>" , a="</s>" , a="<unk>" , a="<sep>" , a="<pad>" , a="<cls>" , a="<mask>" , a=["<eop>", "<eod>"] , a = None , **a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.sp_model)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
if self.remove_space:
SCREAMING_SNAKE_CASE = ' '.join(inputs.strip().split())
else:
SCREAMING_SNAKE_CASE = inputs
SCREAMING_SNAKE_CASE = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
SCREAMING_SNAKE_CASE = unicodedata.normalize('NFKD' , a)
SCREAMING_SNAKE_CASE = ''.join([c for c in outputs if not unicodedata.combining(a)])
if self.do_lower_case:
SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
SCREAMING_SNAKE_CASE = self.preprocess_text(a)
SCREAMING_SNAKE_CASE = self.sp_model.encode(a , out_type=a)
SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(a) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(a)
else:
new_pieces.append(a)
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
return self.sp_model.PieceToId(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
return self.sp_model.IdToPiece(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = ''.join(a).replace(a , ' ').strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , a , a = False , a = None , a = True , **a , ) -> str:
SCREAMING_SNAKE_CASE = kwargs.pop('use_source_tokenizer' , a)
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(a , skip_special_tokens=a)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a))
SCREAMING_SNAKE_CASE = []
sub_texts.append(a)
else:
current_sub_text.append(a)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE = ''.join(a)
SCREAMING_SNAKE_CASE = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE = self.clean_up_tokenization(a)
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is not None:
return ([0] * len(a)) + [1] + ([0] * len(a)) + [1, 1]
return ([0] * len(a)) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a)
elif not os.path.isfile(self.vocab_file):
with open(a , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(a)
return (out_vocab_file,)
| 137
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = RobertaPreLayerNormConfig.from_pretrained(
_UpperCAmelCase , architectures=['RobertaPreLayerNormForMaskedLM'])
# convert state_dict
SCREAMING_SNAKE_CASE = torch.load(hf_hub_download(repo_id=_UpperCAmelCase , filename='pytorch_model.bin'))
SCREAMING_SNAKE_CASE = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.'):
SCREAMING_SNAKE_CASE = 'roberta_prelayernorm.' + tensor_key[len('roberta.') :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight') or tensor_key.endswith('.self.LayerNorm.bias'):
continue
SCREAMING_SNAKE_CASE = tensor_value
SCREAMING_SNAKE_CASE = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_UpperCAmelCase , config=_UpperCAmelCase , state_dict=_UpperCAmelCase)
model.save_pretrained(_UpperCAmelCase)
# convert tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : int = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 137
| 1
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase_ : Optional[Any] = 5_00_00
lowerCAmelCase_ : str = 50_00
lowerCAmelCase_ : str = os.path.split(__file__)
lowerCAmelCase_ : Tuple = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _lowerCamelCase ( lowercase : datasets.Dataset , lowercase : int ) -> Optional[Any]:
for i in range(lowerCamelCase_ ):
_a = dataset[i]
@get_duration
def _lowerCamelCase ( lowercase : datasets.Dataset , lowercase : Dict , lowercase : str ) -> Any:
for i in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ):
_a = dataset[i : i + batch_size]
@get_duration
def _lowerCamelCase ( lowercase : datasets.Dataset , lowercase : int , lowercase : int ) -> Dict:
with dataset.formatted_as(type=lowerCamelCase_ ):
for i in range(lowerCamelCase_ ):
_a = dataset[i]
@get_duration
def _lowerCamelCase ( lowercase : datasets.Dataset , lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> Optional[Any]:
with dataset.formatted_as(type=lowerCamelCase_ ):
for i in range(0 , lowerCamelCase_ , lowerCamelCase_ ):
_a = dataset[i : i + batch_size]
def _lowerCamelCase ( ) -> str:
_a = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_a = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
_a = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_a = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_a = generate_example_dataset(
os.path.join(lowerCamelCase_ , "dataset.arrow" ) , lowerCamelCase_ , num_examples=lowerCamelCase_ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCamelCase_ ) )
_a = func(lowerCamelCase_ , **lowerCamelCase_ )
print("shuffling dataset" )
_a = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCamelCase_ ) )
_a = func(
lowerCamelCase_ , **lowerCamelCase_ )
with open(lowerCamelCase_ , "wb" ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 358
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346
| 0
|
'''simple docstring'''
class lowerCamelCase_ : # Public class to implement a graph
def __init__( self : Optional[Any] , _A : int , _A : int , _A : list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = row
UpperCAmelCase__ : Dict = col
UpperCAmelCase__ : Any = graph
def lowercase_ ( self : int , _A : int , _A : int , _A : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowercase_ ( self : List[str] , _A : int , _A : int , _A : list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase__ : Any = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase__ : str = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase__ : Union[str, Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def lowercase_ ( self : Any ): # And finally, count all islands.
'''simple docstring'''
UpperCAmelCase__ : Any = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase__ : int = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count
| 181
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(lowerCAmelCase__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181
| 1
|
"""simple docstring"""
import qiskit
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
lowercase_ : Tuple = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowercase_ : int = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =single_qubit_measure(2, 2)
print(F"Total count for various states are: {counts}")
| 321
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : str , snake_case__ : Optional[Any] ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase = int(UpperCamelCase__ )
lowerCAmelCase = int(UpperCamelCase__ )
lowerCAmelCase = []
for temp in range(int(UpperCamelCase__ ) ):
series.append(f'''1 / {pow(temp + 1 , int(UpperCamelCase__ ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a = int(input('Enter the last number (nth term) of the P-Series'))
a = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 155
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__UpperCAmelCase : List[Any] = test_metrics
@require_cpu
def _lowerCamelCase ( self: int ) -> Any:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _lowerCamelCase ( self: Any ) -> Union[str, Any]:
self.test_metrics.main()
@require_multi_gpu
def _lowerCamelCase ( self: List[str] ) -> Tuple:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__UpperCAmelCase : Dict = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
| 364
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A ={'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''DeiTFeatureExtractor''']
__A =['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( UpperCamelCase__):
"""simple docstring"""
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_heads" ) )
class _a :
"""simple docstring"""
def __init__( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: str=13 , __lowerCamelCase: Tuple=64 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: List[Any]=[16, 48, 96] , __lowerCamelCase: Union[str, Any]=[1, 3, 6] , __lowerCamelCase: Tuple=[1, 2, 10] , __lowerCamelCase: int=[7, 3, 3] , __lowerCamelCase: Dict=[4, 2, 2] , __lowerCamelCase: int=[2, 1, 1] , __lowerCamelCase: Dict=[2, 2, 2] , __lowerCamelCase: List[str]=[False, False, True] , __lowerCamelCase: str=[0.0, 0.0, 0.0] , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1e-12 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Tuple=True , __lowerCamelCase: Union[str, Any]=2 , ):
'''simple docstring'''
UpperCamelCase__: Dict = parent
UpperCamelCase__: Union[str, Any] = batch_size
UpperCamelCase__: int = image_size
UpperCamelCase__: Dict = patch_sizes
UpperCamelCase__: Any = patch_stride
UpperCamelCase__: Optional[int] = patch_padding
UpperCamelCase__: Any = is_training
UpperCamelCase__: Dict = use_labels
UpperCamelCase__: List[str] = num_labels
UpperCamelCase__: Tuple = num_channels
UpperCamelCase__: int = embed_dim
UpperCamelCase__: int = num_heads
UpperCamelCase__: Dict = stride_kv
UpperCamelCase__: Optional[int] = depth
UpperCamelCase__: int = cls_token
UpperCamelCase__: Optional[Any] = attention_drop_rate
UpperCamelCase__: Tuple = initializer_range
UpperCamelCase__: Dict = layer_norm_eps
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__: Any = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCamelCase__: Tuple = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__: List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = TFCvtModel(config=__lowerCamelCase )
UpperCamelCase__: str = model(__lowerCamelCase , training=__lowerCamelCase )
UpperCamelCase__: Optional[Any] = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase__: Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase__: List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: int = self.num_labels
UpperCamelCase__: Tuple = TFCvtForImageClassification(__lowerCamelCase )
UpperCamelCase__: Tuple = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Dict = config_and_inputs
UpperCamelCase__: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = TFCvtModelTester(self )
UpperCamelCase__: int = TFCvtConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: int = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(__lowerCamelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: int = model_class(__lowerCamelCase )
UpperCamelCase__: Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__: List[str] = [*signature.parameters.keys()]
UpperCamelCase__: Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str ):
UpperCamelCase__: Tuple = model_class(__lowerCamelCase )
UpperCamelCase__: Any = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = outputs.hidden_states
UpperCamelCase__: str = len(self.model_tester.depth )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase__ , UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: int = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__: Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__: List[Any] = TFCvtModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase__: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__: str = self.default_image_processor
UpperCamelCase__: List[str] = prepare_img()
UpperCamelCase__: Tuple = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
UpperCamelCase__: List[str] = model(**__lowerCamelCase )
# verify the logits
UpperCamelCase__: Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCamelCase__: Optional[Any] = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCamelCase , atol=1e-4 ) )
| 149
| 0
|
"""simple docstring"""
def A ( _lowercase , _lowercase , _lowercase ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_lowercase , n - 1 , _lowercase ) * a) % mod
else:
SCREAMING_SNAKE_CASE : Tuple = binary_exponentiation(_lowercase , n / 2 , _lowercase )
return (b * b) % mod
# a prime number
__UpperCamelCase : Tuple = 701
__UpperCamelCase : int = 10_0000_0000
__UpperCamelCase : Any = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 350
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """encodec"""
def __init__( self : int , UpperCamelCase__ : Optional[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase__ : List[str]=2_4000 , UpperCamelCase__ : str=1 , UpperCamelCase__ : str=False , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=128 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Any=[8, 5, 4, 2] , UpperCamelCase__ : Optional[Any]="weight_norm" , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]="reflect" , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=1.0 , UpperCamelCase__ : Dict=1024 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = target_bandwidths
SCREAMING_SNAKE_CASE : int = sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = audio_channels
SCREAMING_SNAKE_CASE : List[str] = normalize
SCREAMING_SNAKE_CASE : Any = chunk_length_s
SCREAMING_SNAKE_CASE : Optional[int] = overlap
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_filters
SCREAMING_SNAKE_CASE : int = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Dict = norm_type
SCREAMING_SNAKE_CASE : List[Any] = kernel_size
SCREAMING_SNAKE_CASE : int = last_kernel_size
SCREAMING_SNAKE_CASE : str = residual_kernel_size
SCREAMING_SNAKE_CASE : int = dilation_growth_rate
SCREAMING_SNAKE_CASE : List[Any] = use_causal_conv
SCREAMING_SNAKE_CASE : List[Any] = pad_mode
SCREAMING_SNAKE_CASE : str = compress
SCREAMING_SNAKE_CASE : Dict = num_lstm_layers
SCREAMING_SNAKE_CASE : List[str] = trim_right_ratio
SCREAMING_SNAKE_CASE : Optional[int] = codebook_size
SCREAMING_SNAKE_CASE : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : List[str] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**UpperCamelCase__ )
@property
def __A ( self : List[str] ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
while b:
snake_case_ ,snake_case_ : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def SCREAMING_SNAKE_CASE__ ( ):
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 327
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A )
snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' )
snake_case_ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case_ : Optional[Any] = model.generate(**_A )
snake_case_ : int = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case_ : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
snake_case_ : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 327
| 1
|
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] , *UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : Optional[int] = eval_examples
a : Union[str, Any] = post_process_function
a : Union[str, Any] = quant_trainer_args
a : str = 1_2_8 # default number of calibration samples
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : int=None):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
a : Dict = calib_dataset if calib_dataset is not None else self.calib_dataset
a : Optional[Any] = self._remove_unused_columns(UpperCAmelCase_ , description='Calibration')
return DataLoader(
UpperCAmelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Tuple=None):
"""simple docstring"""
a : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
a : Union[str, Any] = self.get_calib_dataloader(UpperCAmelCase_)
a : int = self.model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args , calib=UpperCAmelCase_)
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase_)
logger.info('***** Running calibration *****')
logger.info(f""" Num examples = {self.calib_num}""")
logger.info(f""" Batch size = {calib_dataloader.batch_size}""")
for step, inputs in enumerate(UpperCAmelCase_):
# Prediction step
a , a , a : Union[str, Any] = self.prediction_step(UpperCAmelCase_ , UpperCAmelCase_ , prediction_loss_only=UpperCAmelCase_)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase_ , self.quant_trainer_args)
a : Dict = model
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : str = "eval"):
"""simple docstring"""
a : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
a : Optional[int] = self.get_eval_dataloader(UpperCAmelCase_)
a : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a : Dict = self.compute_metrics
a : List[Any] = None
a : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a : str = eval_loop(
UpperCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
a : List[Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
a : Optional[Any] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions)
a : List[str] = self.compute_metrics(UpperCAmelCase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"""{metric_key_prefix}_"""):
a : Optional[Any] = metrics.pop(UpperCAmelCase_)
self.log(UpperCAmelCase_)
else:
a : int = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
a : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_)
return metrics
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str = "test"):
"""simple docstring"""
a : str = self.get_test_dataloader(UpperCAmelCase_)
# Temporarily disable metric computation, we will do it in the loop here.
a : str = self.compute_metrics
a : Optional[int] = None
a : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a : str = eval_loop(
UpperCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
a : int = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
a : Optional[int] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions , 'predict')
a : int = self.compute_metrics(UpperCAmelCase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"""{metric_key_prefix}_"""):
a : Any = metrics.pop(UpperCAmelCase_)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : List[str]="./"):
"""simple docstring"""
a : int = self.eval_dataset
a : List[Any] = self.get_eval_dataloader(UpperCAmelCase_)
a : List[str] = next(iter(UpperCAmelCase_))
# saving device - to make it consistent
a : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
a : int = tuple(v.to(UpperCAmelCase_) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
a : List[str] = True
a : Optional[int] = self.model.to(UpperCAmelCase_)
model.eval()
model.float()
a : str = model.module if hasattr(UpperCAmelCase_ , 'module') else model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args)
a : str = os.path.join(UpperCAmelCase_ , 'model.onnx')
logger.info(f"""exporting model to {output_model_file}""")
a : List[str] = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , export_params=UpperCAmelCase_ , opset_version=1_3 , do_constant_folding=UpperCAmelCase_ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=UpperCAmelCase_ , )
logger.info('onnx export finished')
| 345
|
'''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345
| 1
|
'''simple docstring'''
import qiskit
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> qiskit.result.counts.Counts:
UpperCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCamelCase = qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCamelCase = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 321
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 4_5
SCREAMING_SNAKE_CASE__ = 1_5_8_1
SCREAMING_SNAKE_CASE__ = 1_5_1_7
SCREAMING_SNAKE_CASE__ = 1_5_7_0
SCREAMING_SNAKE_CASE__ = 1_5_8_4
SCREAMING_SNAKE_CASE__ = 1_7_9_3
SCREAMING_SNAKE_CASE__ = 1_7_9_5
SCREAMING_SNAKE_CASE__ = 1_9_1_6
SCREAMING_SNAKE_CASE__ = 1_8_6_4
SCREAMING_SNAKE_CASE__ = 1_9_0_5
SCREAMING_SNAKE_CASE__ = 1_9_1_9
SCREAMING_SNAKE_CASE__ = 2_4_2_9
SCREAMING_SNAKE_CASE__ = 2_2_0_8
SCREAMING_SNAKE_CASE__ = 2_4_1_8
SCREAMING_SNAKE_CASE__ = 2_3_2_3
SCREAMING_SNAKE_CASE__ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 321
| 1
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase_ : Tuple = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _A ( lowercase , lowercase ):
"""simple docstring"""
warnings.warn(lowercase , lowercase )
requires_backends(lowercase , '''sklearn''' )
return (preds == labels).mean()
def _A ( lowercase , lowercase ):
"""simple docstring"""
warnings.warn(lowercase , lowercase )
requires_backends(lowercase , '''sklearn''' )
a =simple_accuracy(lowercase , lowercase )
a =fa_score(y_true=lowercase , y_pred=lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( lowercase , lowercase ):
"""simple docstring"""
warnings.warn(lowercase , lowercase )
requires_backends(lowercase , '''sklearn''' )
a =pearsonr(lowercase , lowercase )[0]
a =spearmanr(lowercase , lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
warnings.warn(lowercase , lowercase )
requires_backends(lowercase , '''sklearn''' )
assert len(lowercase ) == len(lowercase ), f'''Predictions and labels have mismatched lengths {len(lowercase )} and {len(lowercase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowercase , lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowercase , lowercase )}
elif task_name == "mrpc":
return acc_and_fa(lowercase , lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(lowercase , lowercase )
elif task_name == "qqp":
return acc_and_fa(lowercase , lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowercase , lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowercase , lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowercase , lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowercase , lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowercase , lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(lowercase )
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
warnings.warn(lowercase , lowercase )
requires_backends(lowercase , '''sklearn''' )
if len(lowercase ) != len(lowercase ):
raise ValueError(f'''Predictions and labels have mismatched lengths {len(lowercase )} and {len(lowercase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(lowercase )
| 364
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : int = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215
| 0
|
_lowerCamelCase : Tuple = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
assert type(lowercase_ ) in (int, float) and decimal == int(lowercase_ )
A__ = int(lowercase_ )
A__ = ''''''
A__ = False
if decimal < 0:
A__ = True
decimal *= -1
while decimal > 0:
A__ , A__ = divmod(lowercase_ , 16 )
A__ = values[remainder] + hexadecimal
A__ = '''0x''' + hexadecimal
if negative:
A__ = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__: Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''facebook/nllb-200-distilled-600M'''
lowercase__ : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase__ : List[str] = '''translator'''
lowercase__ : Optional[Any] = AutoTokenizer
lowercase__ : int = AutoModelForSeqaSeqLM
lowercase__ : List[Any] = LANGUAGE_CODES
lowercase__ : str = ['''text''', '''text''', '''text''']
lowercase__ : Any = ['''text''']
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__ : Tuple = self.lang_to_code[src_lang]
__magic_name__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 342
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : List[Any] = {"vocab_file": "spm_char.model"}
a_ : int = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
a_ : Any = {
"microsoft/speecht5_asr": 10_24,
"microsoft/speecht5_tts": 10_24,
"microsoft/speecht5_vc": 10_24,
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : Dict =VOCAB_FILES_NAMES
lowercase : str =PRETRAINED_VOCAB_FILES_MAP
lowercase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_, eos_token=a_, unk_token=a_, pad_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(a_, out_type=a_ )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(a_ )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.sp_model.IdToPiece(a_ )
return token
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
lowerCamelCase_ =[]
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
lowerCamelCase_ =[1]
if token_ids_a is None:
return ([0] * len(a_ )) + suffix_ones
return ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
a_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 358
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : int = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __snake_case ( UpperCamelCase_ ):
_a = '''roc_bert'''
def __init__( self : List[Any] , A_ : Optional[Any]=3_0_5_2_2 , A_ : List[str]=7_6_8 , A_ : Tuple=1_2 , A_ : List[str]=1_2 , A_ : List[str]=3_0_7_2 , A_ : Any="gelu" , A_ : str=0.1 , A_ : int=0.1 , A_ : Optional[int]=5_1_2 , A_ : int=2 , A_ : List[str]=0.02 , A_ : Union[str, Any]=1e-12 , A_ : Union[str, Any]=True , A_ : Tuple=0 , A_ : Union[str, Any]="absolute" , A_ : Optional[Any]=None , A_ : Any=True , A_ : Optional[int]=True , A_ : List[Any]=7_6_8 , A_ : str=9_1_0 , A_ : Dict=5_1_2 , A_ : Optional[int]=2_4_8_5_8 , A_ : Optional[Any]=True , **A_ : Dict , ):
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Tuple = enable_pronunciation
lowerCAmelCase_ : Optional[Any] = enable_shape
lowerCAmelCase_ : Union[str, Any] = pronunciation_embed_dim
lowerCAmelCase_ : List[Any] = pronunciation_vocab_size
lowerCAmelCase_ : Tuple = shape_embed_dim
lowerCAmelCase_ : str = shape_vocab_size
lowerCAmelCase_ : Optional[int] = concat_input
lowerCAmelCase_ : Optional[Any] = position_embedding_type
lowerCAmelCase_ : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=A_ , **A_)
| 103
|
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_lowerCamelCase : List[Any] = 6_3_7_8_1_3_7.0
_lowerCamelCase : List[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
_lowerCamelCase : Optional[int] = 637_8137
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
A = (AXIS_A - AXIS_B) / AXIS_A
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = radians(UpperCAmelCase )
A = radians(UpperCAmelCase )
# Equation
A = sin((phi_a - phi_a) / 2 )
A = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A = sqrt(sin_sq_phi + (cos(UpperCAmelCase ) * cos(UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258
| 0
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=99 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : Dict=5 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=4 , ) -> List[str]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def __A ( self : str ) -> List[Any]:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : str = True
a__ : Dict = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self : List[Any] ) -> List[Any]:
__lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def __A ( self : Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Optional[int] ) -> List[Any]:
__lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )[0]
__lowerCamelCase = 5_00_00
__lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 354
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered")
def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 339
| 0
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Tuple ,*lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int=None ,lowerCamelCase_: str=None ,lowerCamelCase_: Any=None ,**lowerCamelCase_: Optional[int] ) -> List[str]:
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = eval_examples
UpperCAmelCase_ : str = post_process_function
UpperCAmelCase_ : str = quant_trainer_args
UpperCAmelCase_ : List[Any] = 128 # default number of calibration samples
def A__ ( self: str ,lowerCamelCase_: Optional[int]=None ) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
UpperCAmelCase_ : Union[str, Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCAmelCase_ : Optional[int] = self._remove_unused_columns(lowerCamelCase_ ,description="""Calibration""" )
return DataLoader(
lowerCamelCase_ ,batch_size=self.args.eval_batch_size ,collate_fn=self.data_collator ,drop_last=self.args.dataloader_drop_last ,num_workers=self.args.dataloader_num_workers ,pin_memory=self.args.dataloader_pin_memory ,shuffle=lowerCamelCase_ ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=None ) -> str:
UpperCAmelCase_ : Tuple = self.train_dataset if calib_dataset is None else calib_dataset
UpperCAmelCase_ : Tuple = self.get_calib_dataloader(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.model
quant_trainer.configure_model(lowerCamelCase_ ,self.quant_trainer_args ,calib=lowerCamelCase_ )
model.eval()
quant_trainer.enable_calibration(lowerCamelCase_ )
logger.info("""***** Running calibration *****""" )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(lowerCamelCase_ ):
# Prediction step
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.prediction_step(lowerCamelCase_ ,lowerCamelCase_ ,prediction_loss_only=lowerCamelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCamelCase_ ,self.quant_trainer_args )
UpperCAmelCase_ : int = model
def A__ ( self: str ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: int=None ,lowerCamelCase_: str = "eval" ) -> int:
UpperCAmelCase_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ : Optional[Any] = self.get_eval_dataloader(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : int = self.compute_metrics
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ : Dict = eval_loop(
lowerCamelCase_ ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=lowerCamelCase_ ,)
finally:
UpperCAmelCase_ : Optional[Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCAmelCase_ : List[str] = self.post_process_function(lowerCamelCase_ ,lowerCamelCase_ ,output.predictions )
UpperCAmelCase_ : Dict = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
UpperCAmelCase_ : int = metrics.pop(lowerCamelCase_ )
self.log(lowerCamelCase_ )
else:
UpperCAmelCase_ : Optional[int] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ : List[Any] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,lowerCamelCase_ )
return metrics
def A__ ( self: str ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: str = "test" ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_test_dataloader(lowerCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : List[Any] = self.compute_metrics
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ : Dict = eval_loop(
lowerCamelCase_ ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=lowerCamelCase_ ,)
finally:
UpperCAmelCase_ : str = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ : List[str] = self.post_process_function(lowerCamelCase_ ,lowerCamelCase_ ,output.predictions ,"""predict""" )
UpperCAmelCase_ : List[Any] = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
UpperCAmelCase_ : Optional[Any] = metrics.pop(lowerCamelCase_ )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=lowerCamelCase_ )
def A__ ( self: List[Any] ,lowerCamelCase_: Any="./" ) -> str:
UpperCAmelCase_ : Optional[Any] = self.eval_dataset
UpperCAmelCase_ : Dict = self.get_eval_dataloader(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = next(iter(lowerCamelCase_ ) )
# saving device - to make it consistent
UpperCAmelCase_ : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
UpperCAmelCase_ : Optional[Any] = tuple(v.to(lowerCamelCase_ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Dict = self.model.to(lowerCamelCase_ )
model.eval()
model.float()
UpperCAmelCase_ : List[str] = model.module if hasattr(lowerCamelCase_ ,"""module""" ) else model
quant_trainer.configure_model(lowerCamelCase_ ,self.quant_trainer_args )
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""model.onnx""" )
logger.info(F'''exporting model to {output_model_file}''' )
UpperCAmelCase_ : Any = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,export_params=lowerCamelCase_ ,opset_version=13 ,do_constant_folding=lowerCamelCase_ ,input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] ,output_names=["""output_start_logits""", """output_end_logits"""] ,dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} ,verbose=lowerCamelCase_ ,)
logger.info("""onnx export finished""" )
| 345
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 359
|
def snake_case( __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
lowercase : List[Any] = len(__magic_name__ )
lowercase : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase : List[str] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase : str = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
| 0
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =8
# DPR tok
lowerCamelCase_ =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ =os.path.join(self.tmpdirname, '''dpr_tokenizer''' )
os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase )
lowerCamelCase_ =os.path.join(lowerCAmelCase, DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowerCamelCase_ =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase_ ={'''unk_token''': '''<unk>'''}
lowerCamelCase_ =os.path.join(self.tmpdirname, '''bart_tokenizer''' )
os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase )
lowerCamelCase_ =os.path.join(lowerCAmelCase, BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(lowerCAmelCase, BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''dpr_tokenizer''' ) )
def lowercase__ ( self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''dpr_tokenizer''' ) )
def lowercase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''bart_tokenizer''' ) )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''', string_factory='''Flat''', metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_dataset()
lowerCamelCase_ =RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCamelCase_ =dataset
lowerCamelCase_ =RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
return retriever
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_dataset()
lowerCamelCase_ =RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name='''custom''', )
if from_disk:
lowerCamelCase_ =os.path.join(self.tmpdirname, '''dataset''' )
lowerCamelCase_ =os.path.join(self.tmpdirname, '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname, '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname, '''dataset''' ) )
del dataset
lowerCamelCase_ =RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
else:
lowerCamelCase_ =RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, lowerCAmelCase ), )
return retriever
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''', string_factory='''Flat''', metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ =os.path.join(self.tmpdirname, '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''', index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''], open(index_file_name + '''.index_meta.dpr''', '''wb''' ) )
lowerCamelCase_ =os.path.join(self.tmpdirname, '''psgs_w100.tsv.pkl''' )
lowerCamelCase_ ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(lowerCAmelCase, open(lowerCAmelCase, '''wb''' ) )
lowerCamelCase_ =RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name='''legacy''', index_path=self.tmpdirname, )
lowerCamelCase_ =RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCamelCase_ =self.get_dummy_dataset()
retriever.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_legacy_index_retriever()
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0], '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0], '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase_ =[[5, 7], [10, 11]]
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever(lowerCAmelCase, lowerCAmelCase, prefix=retriever.config.generator.prefix, n_docs=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, np.ndarray )
lowerCamelCase_ =retriever(
lowerCAmelCase, lowerCAmelCase, prefix=retriever.config.generator.prefix, n_docs=lowerCAmelCase, return_tensors='''pt''', )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dpr_ctx_encoder_tokenizer()
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
retriever.set_ctx_encoder_tokenizer(lowerCAmelCase )
lowerCamelCase_ =[[5, 7], [10, 11]]
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever(lowerCAmelCase, lowerCAmelCase, prefix=retriever.config.generator.prefix, n_docs=lowerCAmelCase )
self.assertEqual(
len(lowerCAmelCase ), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ), lowerCAmelCase ) # check for doc token related keys in dictionary.
| 75
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
A_ : int = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = torch.load(lowerCAmelCase_ , map_location="""cpu""" )
return sd
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=rename_keys_prefix )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = OrderedDict()
_UpperCAmelCase : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_UpperCAmelCase : Dict = key
for name_pair in rename_keys_prefix:
_UpperCAmelCase : str = new_key.replace(name_pair[0] , name_pair[1] )
_UpperCAmelCase : str = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_UpperCAmelCase : int = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_UpperCAmelCase : Optional[int] = """pretraining"""
if "vcr" in checkpoint_path:
_UpperCAmelCase : Optional[int] = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase : List[Any] = {"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
_UpperCAmelCase : Any = {"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
_UpperCAmelCase : Any = {"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_UpperCAmelCase : str = {"""visual_embedding_dim""": 512}
_UpperCAmelCase : int = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase : str = {"""visual_embedding_dim""": 2048}
_UpperCAmelCase : int = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_UpperCAmelCase : List[str] = {"""visual_embedding_dim""": 2048, """num_labels""": 3129}
_UpperCAmelCase : int = """vqa"""
elif "nlvr" in checkpoint_path:
_UpperCAmelCase : int = {
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
_UpperCAmelCase : Optional[Any] = """nlvr"""
_UpperCAmelCase : int = VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
_UpperCAmelCase : Any = load_state_dict(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = get_new_dict(lowerCAmelCase_ , lowerCAmelCase_ )
if model_type == "pretraining":
_UpperCAmelCase : List[str] = VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
_UpperCAmelCase : Optional[int] = VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
_UpperCAmelCase : str = VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
_UpperCAmelCase : Dict = VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
A_ : int = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 215
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any ) ->int:
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A__ : Tuple = """lm_head"""
A__ : Optional[Any] = getattr(UpperCAmelCase__, UpperCAmelCase__ )
if weight_type is not None:
A__ : int = getattr(UpperCAmelCase__, UpperCAmelCase__ ).shape
else:
A__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A__ : Optional[Any] = value
elif weight_type == "weight_g":
A__ : int = value
elif weight_type == "weight_v":
A__ : Union[str, Any] = value
elif weight_type == "bias":
A__ : Tuple = value
else:
A__ : Optional[Any] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Any ) ->List[str]:
A__ : Optional[int] = []
A__ : List[str] = fairseq_model.state_dict()
A__ : Any = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, hf_model.config.feat_extract_norm == """group""", )
A__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
A__ : Optional[Any] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ : int = True
if "*" in mapped_key:
A__ : Any = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
A__ : List[str] = mapped_key.replace("""*""", UpperCAmelCase__ )
if "weight_g" in name:
A__ : List[Any] = """weight_g"""
elif "weight_v" in name:
A__ : Dict = """weight_v"""
elif "bias" in name:
A__ : List[str] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ : Any = """weight"""
else:
A__ : List[str] = None
set_recursively(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple ) ->int:
A__ : int = full_name.split("""conv_layers.""" )[-1]
A__ : Optional[Any] = name.split(""".""" )
A__ : List[Any] = int(items[0] )
A__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A__ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A__ : List[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A__ : List[str] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple=None, UpperCAmelCase__ : str=None, UpperCAmelCase__ : Optional[int]=True ) ->Union[str, Any]:
if config_path is not None:
A__ : Optional[int] = UniSpeechConfig.from_pretrained(UpperCAmelCase__ )
else:
A__ : Any = UniSpeechConfig()
if is_finetuned:
if dict_path:
A__ : int = Dictionary.load_from_json(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ : Any = target_dict.pad_index
A__ : Union[str, Any] = target_dict.bos_index
A__ : Tuple = target_dict.eos_index
A__ : int = len(target_dict.symbols )
A__ : int = os.path.join(UpperCAmelCase__, """vocab.json""" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
A__ : str = target_dict.indices
# fairseq has the <pad> and <s> switched
A__ : List[str] = 4_2
A__ : List[str] = 4_3
with open(UpperCAmelCase__, """w""", encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A__ : Union[str, Any] = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=UpperCAmelCase__, )
A__ : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
A__ : Dict = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6_0_0_0, padding_value=0, do_normalize=UpperCAmelCase__, return_attention_mask=UpperCAmelCase__, )
A__ : Optional[int] = WavaVecaProcessor(feature_extractor=UpperCAmelCase__, tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
A__ : Any = UniSpeechForCTC(UpperCAmelCase__ )
else:
A__ : Union[str, Any] = UniSpeechForPreTraining(UpperCAmelCase__ )
if is_finetuned:
A__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
A__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A__ : str = model[0].eval()
recursively_load_weights(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
hf_unispeech.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 368
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ = random.Random()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple=1.0, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : str=None ) ->Union[str, Any]:
if rng is None:
A__ : Optional[int] = global_rng
A__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[str]=7 , snake_case : str=400 , snake_case : Optional[Any]=2000 , snake_case : Union[str, Any]=10 , snake_case : str=160 , snake_case : List[str]=8 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=4000 , snake_case : Any=False , snake_case : int=True , ):
'''simple docstring'''
A__ : Any = parent
A__ : str = batch_size
A__ : List[str] = min_seq_length
A__ : Dict = max_seq_length
A__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : Dict = padding_value
A__ : Optional[Any] = sampling_rate
A__ : Any = return_attention_mask
A__ : Optional[int] = do_normalize
A__ : Tuple = feature_size
A__ : Optional[Any] = chunk_length
A__ : Union[str, Any] = hop_length
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict=False , snake_case : Optional[Any]=False ):
'''simple docstring'''
def _flatten(snake_case : Dict ):
return list(itertools.chain(*snake_case ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = WhisperFeatureExtractionTester(self )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : List[Any] = feat_extract_first.mel_filters
A__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(snake_case , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case )
A__ : int = self.feature_extraction_class.from_json_file(snake_case )
A__ : Dict = feat_extract_first.to_dict()
A__ : str = feat_extract_second.to_dict()
A__ : str = feat_extract_first.mel_filters
A__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
A__ : Dict = feature_extractor(snake_case , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : str = np.asarray(snake_case )
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test truncation required
A__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
A__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : str = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
import torch
A__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Union[str, Any] = ds.sort("""id""" ).select(range(snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ : Optional[Any] = self._load_datasamples(1 )
A__ : Union[str, Any] = WhisperFeatureExtractor()
A__ : List[str] = feature_extractor(snake_case , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1e-4 ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Union[str, Any] = self._load_datasamples(1 )[0]
A__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1e-3 ) )
| 296
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : bool , _UpperCAmelCase : list[int] , _UpperCAmelCase : float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(_UpperCAmelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase : int = [90, 23, 6, 33, 21, 65, 123, 34_423]
_UpperCAmelCase : List[Any] = math.log(len(_UpperCAmelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 31
|
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list:
__a = len(a__ )
__a = [[0] * n for i in range(a__ )]
for i in range(a__ ):
__a = y_points[i]
for i in range(2 , a__ ):
for j in range(a__ , a__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__A : Tuple = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : Any = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : str = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : Optional[int] = {
"num_train_timesteps": 40,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
__A : Union[str, Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
__A : Optional[Any] = {
"num_train_timesteps": 151,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
if isinstance(A__ , A__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def UpperCamelCase_ ( A__ : Tuple , A__ : Tuple , A__ : str , A__ : Tuple , A__ : Any=False ):
'''simple docstring'''
lowerCAmelCase_ : Dict = checkpoint[f'{old_prefix}.in_layers.0.weight']
lowerCAmelCase_ : Any = checkpoint[f'{old_prefix}.in_layers.0.bias']
lowerCAmelCase_ : Tuple = checkpoint[f'{old_prefix}.in_layers.2.weight']
lowerCAmelCase_ : Optional[int] = checkpoint[f'{old_prefix}.in_layers.2.bias']
lowerCAmelCase_ : Optional[int] = checkpoint[f'{old_prefix}.emb_layers.1.weight']
lowerCAmelCase_ : List[Any] = checkpoint[f'{old_prefix}.emb_layers.1.bias']
lowerCAmelCase_ : Tuple = checkpoint[f'{old_prefix}.out_layers.0.weight']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'{old_prefix}.out_layers.0.bias']
lowerCAmelCase_ : Union[str, Any] = checkpoint[f'{old_prefix}.out_layers.3.weight']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'{old_prefix}.out_layers.3.bias']
if has_skip:
lowerCAmelCase_ : Any = checkpoint[f'{old_prefix}.skip_connection.weight']
lowerCAmelCase_ : Union[str, Any] = checkpoint[f'{old_prefix}.skip_connection.bias']
return new_checkpoint
def UpperCamelCase_ ( A__ : Dict , A__ : Any , A__ : int , A__ : Optional[Any] , A__ : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : str = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[str] = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
lowerCAmelCase_ : Any = checkpoint[f'{old_prefix}.norm.weight']
lowerCAmelCase_ : Union[str, Any] = checkpoint[f'{old_prefix}.norm.bias']
lowerCAmelCase_ : Any = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : Tuple = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : int = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : List[Any] = (
checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase_ : Dict = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_ ( A__ : str , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = torch.load(A__ , map_location="""cpu""" )
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : Optional[Any] = checkpoint["""time_embed.0.weight"""]
lowerCAmelCase_ : List[str] = checkpoint["""time_embed.0.bias"""]
lowerCAmelCase_ : Union[str, Any] = checkpoint["""time_embed.2.weight"""]
lowerCAmelCase_ : Dict = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase_ : List[Any] = checkpoint["""label_emb.weight"""]
lowerCAmelCase_ : List[str] = checkpoint["""input_blocks.0.0.weight"""]
lowerCAmelCase_ : Any = checkpoint["""input_blocks.0.0.bias"""]
lowerCAmelCase_ : int = unet_config["""down_block_types"""]
lowerCAmelCase_ : List[Any] = unet_config["""layers_per_block"""]
lowerCAmelCase_ : int = unet_config["""attention_head_dim"""]
lowerCAmelCase_ : Dict = unet_config["""block_out_channels"""]
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = channels_list[0]
for i, layer_type in enumerate(A__ ):
lowerCAmelCase_ : Optional[int] = channels_list[i]
lowerCAmelCase_ : Union[str, Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A__ ):
lowerCAmelCase_ : List[Any] = f'down_blocks.{i}.resnets.{j}'
lowerCAmelCase_ : Union[str, Any] = f'input_blocks.{current_layer}.0'
lowerCAmelCase_ : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase_ : Dict = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A__ ):
lowerCAmelCase_ : Dict = f'down_blocks.{i}.resnets.{j}'
lowerCAmelCase_ : Tuple = f'input_blocks.{current_layer}.0'
lowerCAmelCase_ : str = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase_ : List[str] = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
lowerCAmelCase_ : Union[str, Any] = f'down_blocks.{i}.attentions.{j}'
lowerCAmelCase_ : List[str] = f'input_blocks.{current_layer}.1'
lowerCAmelCase_ : Optional[int] = convert_attention(
A__ , A__ , A__ , A__ , A__ )
current_layer += 1
if i != len(A__ ) - 1:
lowerCAmelCase_ : Union[str, Any] = f'down_blocks.{i}.downsamplers.0'
lowerCAmelCase_ : str = f'input_blocks.{current_layer}.0'
lowerCAmelCase_ : Optional[int] = convert_resnet(A__ , A__ , A__ , A__ )
current_layer += 1
lowerCAmelCase_ : List[str] = current_channels
# hardcoded the mid-block for now
lowerCAmelCase_ : str = """mid_block.resnets.0"""
lowerCAmelCase_ : Optional[Any] = """middle_block.0"""
lowerCAmelCase_ : int = convert_resnet(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Dict = """mid_block.attentions.0"""
lowerCAmelCase_ : str = """middle_block.1"""
lowerCAmelCase_ : List[Any] = convert_attention(A__ , A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Optional[int] = """mid_block.resnets.1"""
lowerCAmelCase_ : Tuple = """middle_block.2"""
lowerCAmelCase_ : Tuple = convert_resnet(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Tuple = unet_config["""up_block_types"""]
for i, layer_type in enumerate(A__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase_ : Optional[Any] = f'up_blocks.{i}.resnets.{j}'
lowerCAmelCase_ : str = f'output_blocks.{current_layer}.0'
lowerCAmelCase_ : Dict = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
current_layer += 1
if i != len(A__ ) - 1:
lowerCAmelCase_ : Any = f'up_blocks.{i}.upsamplers.0'
lowerCAmelCase_ : Dict = f'output_blocks.{current_layer-1}.1'
lowerCAmelCase_ : List[Any] = convert_resnet(A__ , A__ , A__ , A__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase_ : str = f'up_blocks.{i}.resnets.{j}'
lowerCAmelCase_ : Optional[int] = f'output_blocks.{current_layer}.0'
lowerCAmelCase_ : Any = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
lowerCAmelCase_ : Tuple = f'up_blocks.{i}.attentions.{j}'
lowerCAmelCase_ : Dict = f'output_blocks.{current_layer}.1'
lowerCAmelCase_ : Union[str, Any] = convert_attention(
A__ , A__ , A__ , A__ , A__ )
current_layer += 1
if i != len(A__ ) - 1:
lowerCAmelCase_ : Optional[int] = f'up_blocks.{i}.upsamplers.0'
lowerCAmelCase_ : Tuple = f'output_blocks.{current_layer-1}.2'
lowerCAmelCase_ : str = convert_resnet(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Union[str, Any] = checkpoint["""out.0.weight"""]
lowerCAmelCase_ : Any = checkpoint["""out.0.bias"""]
lowerCAmelCase_ : str = checkpoint["""out.2.weight"""]
lowerCAmelCase_ : List[Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__A : Optional[int] = parser.parse_args()
__A : List[Any] = strabool(args.class_cond)
__A : str = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__A : List[str] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : int = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__A : Union[str, Any] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__A : List[str] = None
__A : Dict = con_pt_to_diffuser(args.unet_path, unet_config)
__A : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__A : int = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__A : Union[str, Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__A : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__A : Tuple = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 89
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 42
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , lowerCamelCase : int = 6_55_36 , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 0 , lowerCamelCase : str = "fourier" , lowerCamelCase : bool = True , lowerCamelCase : bool = False , lowerCamelCase : float = 0.0 , lowerCamelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase : Tuple[str] = "UNetMidBlock1D" , lowerCamelCase : str = None , lowerCamelCase : Tuple[int] = (32, 32, 64) , lowerCamelCase : str = None , lowerCamelCase : int = 8 , lowerCamelCase : int = 1 , lowerCamelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCAmelCase_ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase_ : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
lowerCAmelCase_ : List[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase_ : Optional[int] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase_ : List[str] = block_out_channels[0] * 4
lowerCAmelCase_ : List[str] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
lowerCAmelCase_ : str = nn.ModuleList([] )
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : str = nn.ModuleList([] )
lowerCAmelCase_ : List[str] = None
# down
lowerCAmelCase_ : List[str] = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = output_channel
lowerCAmelCase_ : str = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase_ : int = i == len(lowerCamelCase ) - 1
lowerCAmelCase_ : List[str] = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
lowerCAmelCase_ : Dict = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
lowerCAmelCase_ : List[str] = list(reversed(lowerCamelCase ) )
lowerCAmelCase_ : str = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase_ : Optional[int] = out_channels
else:
lowerCAmelCase_ : Optional[Any] = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
lowerCAmelCase_ : Any = output_channel
lowerCAmelCase_ : str = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
lowerCAmelCase_ : int = i == len(lowerCamelCase ) - 1
lowerCAmelCase_ : Any = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = output_channel
# out
lowerCAmelCase_ : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase_ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def __lowercase ( self : List[Any] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : Union[torch.Tensor, float, int] , lowerCamelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCAmelCase_ : Optional[Any] = timestep
if not torch.is_tensor(lowerCamelCase ):
lowerCAmelCase_ : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ : Dict = timesteps[None].to(sample.device )
lowerCAmelCase_ : int = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
lowerCAmelCase_ : int = self.time_mlp(lowerCamelCase )
else:
lowerCAmelCase_ : Tuple = timestep_embed[..., None]
lowerCAmelCase_ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase_ : Union[str, Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase_ : Optional[Any] = ()
for downsample_block in self.down_blocks:
lowerCAmelCase_, lowerCAmelCase_ : Any = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase_ : int = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase_ : str = down_block_res_samples[-1:]
lowerCAmelCase_ : Union[str, Any] = down_block_res_samples[:-1]
lowerCAmelCase_ : List[str] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
lowerCAmelCase_ : Union[str, Any] = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 89
| 1
|
'''simple docstring'''
import cmath
import math
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = math.radians(_UpperCAmelCase )
__UpperCamelCase : int = math.radians(_UpperCAmelCase )
# Convert voltage and current to rectangular form
__UpperCamelCase : Tuple = cmath.rect(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = cmath.rect(_UpperCAmelCase , _UpperCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Tuple , ):
super().__init__(**__UpperCAmelCase )
lowercase__ : Any = size if size is not None else {"shortest_edge": 224}
lowercase__ : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowercase__ : int = crop_size if crop_size is not None else {"height": 256, "width": 256}
lowercase__ : str = get_size_dict(__UpperCAmelCase , param_name="crop_size" )
lowercase__ : Any = do_resize
lowercase__ : List[Any] = size
lowercase__ : Optional[int] = resample
lowercase__ : Optional[Any] = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Tuple = do_center_crop
lowercase__ : str = crop_size
lowercase__ : str = do_flip_channel_order
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : List[Any] , ):
lowercase__ : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : str = get_resize_output_image_size(__UpperCAmelCase , size=size["shortest_edge"] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Any , ):
lowercase__ : List[Any] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, float] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
return flip_channel_order(__UpperCAmelCase , data_format=__UpperCAmelCase )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : int , ):
lowercase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : List[Any] = resample if resample is not None else self.resample
lowercase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : List[Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : List[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowercase__ : int = crop_size if crop_size is not None else self.crop_size
lowercase__ : Dict = get_size_dict(__UpperCAmelCase , param_name="crop_size" )
lowercase__ : Optional[Any] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
lowercase__ : Union[str, Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowercase__ : int = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
lowercase__ : Union[str, Any] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__ : Union[str, Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase__ : List[Any] = [self.flip_channel_order(image=__UpperCAmelCase ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowercase__ : Any = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Tuple] = None ):
lowercase__ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__UpperCAmelCase ):
lowercase__ : Optional[Any] = target_sizes.numpy()
lowercase__ : Optional[int] = []
for idx in range(len(__UpperCAmelCase ) ):
lowercase__ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__UpperCAmelCase )
lowercase__ : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCAmelCase )
else:
lowercase__ : Union[str, Any] = logits.argmax(dim=1 )
lowercase__ : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 354
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class snake_case__(Generic[KEY, VAL] ):
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
class snake_case__(_Item ):
"""simple docstring"""
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __bool__( self : Tuple ):
return False
lowerCAmelCase__ = _DeletedItem()
class snake_case__(MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int = 8 , SCREAMING_SNAKE_CASE : float = 0.75 ):
lowercase__ : Any = initial_block_size
lowercase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase__ : Dict = capacity_factor
lowercase__ : Optional[int] = 0
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : KEY ):
return hash(SCREAMING_SNAKE_CASE ) % len(self._buckets )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
return (ind + 1) % len(self._buckets )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
lowercase__ : Tuple = self._buckets[ind]
if not stored:
lowercase__ : int = _Item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self._len += 1
return True
elif stored.key == key:
lowercase__ : str = _Item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return True
else:
return False
def snake_case ( self : str ):
lowercase__ : str = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase__ : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Tuple = self._buckets
lowercase__ : Optional[int] = [None] * new_size
lowercase__ : int = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def snake_case ( self : int ):
self._resize(len(self._buckets ) * 2 )
def snake_case ( self : Optional[Any] ):
self._resize(len(self._buckets ) // 2 )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : KEY ):
lowercase__ : Tuple = self._get_bucket_index(SCREAMING_SNAKE_CASE )
for _ in range(len(self._buckets ) ):
yield ind
lowercase__ : Union[str, Any] = self._get_next_ind(SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
if self._try_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
break
def __setitem__( self : List[str] , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __delitem__( self : int , SCREAMING_SNAKE_CASE : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE )
if item is _deleted:
continue
if item.key == key:
lowercase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple , SCREAMING_SNAKE_CASE : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE )
def __len__( self : Optional[Any] ):
return self._len
def __iter__( self : List[str] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowercase__ : int = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 121
| 0
|
'''simple docstring'''
def a_ ( __snake_case : str = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =limit + 1
lowerCamelCase_ =[0] * limit
for first_term in range(1 , _lowerCAmelCase ):
for n in range(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase_ =first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase_ =sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 75
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = KandinskyVaaControlnetPipeline
__lowerCamelCase : Optional[int] = ["image_embeds", "negative_image_embeds", "hint"]
__lowerCamelCase : Dict = ["image_embeds", "negative_image_embeds", "hint"]
__lowerCamelCase : List[str] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowerCamelCase : Dict = False
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ):
return 100
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : Any = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A : List[str] = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def _lowerCAmelCase ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.dummy_unet
A : Tuple = self.dummy_movq
A : List[Any] = DDIMScheduler(
num_train_timesteps=1000, beta_schedule="""linear""", beta_start=0.0_0085, beta_end=0.012, clip_sample=lowerCamelCase__, set_alpha_to_one=lowerCamelCase__, steps_offset=1, prediction_type="""epsilon""", thresholding=lowerCamelCase__, )
A : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=0 ):
A : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
A : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create hint
A : int = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("""mps""" ):
A : Optional[Any] = torch.manual_seed(lowerCamelCase__ )
else:
A : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
A : List[str] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self ):
A : Dict = """cpu"""
A : List[str] = self.get_dummy_components()
A : Dict = self.pipeline_class(**lowerCamelCase__ )
A : Optional[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : int = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
A : Union[str, Any] = output.images
A : str = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ), return_dict=lowerCamelCase__, )[0]
A : Optional[int] = image[0, -3:, -3:, -1]
A : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A : Dict = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
A : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A : Optional[Any] = torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 255.0
A : List[str] = hint.permute(2, 0, 1 ).unsqueeze(0 )
A : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""", torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
A : Tuple = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""", torch_dtype=torch.floataa )
A : Union[str, Any] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
A : Optional[Any] = """A robot, 4k photo"""
A : Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
A , A : int = pipe_prior(
lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=5, negative_prompt="""""", ).to_tuple()
A : Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
A : int = pipeline(
image_embeds=lowerCamelCase__, negative_image_embeds=lowerCamelCase__, hint=lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=100, output_type="""np""", )
A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__, lowerCamelCase__ )
| 116
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase_ : str = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase_ : Any = """document_qa"""
UpperCAmelCase_ : Tuple = AutoProcessor
UpperCAmelCase_ : Any = VisionEncoderDecoderModel
UpperCAmelCase_ : List[Any] = ["""image""", """text"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def __init__( self : Dict , *lowercase_ : Optional[Any] , **lowercase_ : Dict ) -> Any:
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : "Image" , lowercase_ : str ) -> Any:
UpperCAmelCase : Any = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
UpperCAmelCase : Dict = task_prompt.replace('{user_input}' , lowercase_ )
UpperCAmelCase : Any = self.pre_processor.tokenizer(
lowercase_ , add_special_tokens=lowercase_ , return_tensors='pt' ).input_ids
UpperCAmelCase : Optional[Any] = self.pre_processor(lowercase_ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase_ ( self : str , lowercase_ : int ) -> Any:
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowercase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowercase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowercase_ , ).sequences
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.pre_processor.batch_decode(lowercase_ )[0]
UpperCAmelCase : Tuple = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
UpperCAmelCase : List[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
UpperCAmelCase : Tuple = re.sub(R'<.*?>' , '' , lowercase_ , count=1 ).strip() # remove first task start token
UpperCAmelCase : List[Any] = self.pre_processor.tokenajson(lowercase_ )
return sequence["answer"]
| 280
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 280
| 1
|
"""simple docstring"""
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase_ : Union[str, Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase_ : int = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )]
# Reverse whole list
UpperCAmelCase_ : List[Any] = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 61
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296
| 0
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCAmelCase__ ( A_ ):
__a = 42
__a = jnp.floataa
__a = True
def lowercase ( self : Tuple ):
super().setup()
_snake_case = nn.Dense(5 , dtype=self.dtype )
def __call__( self : str , *_lowerCamelCase : int , **_lowerCamelCase : Any ):
_snake_case = super().__call__(*_lowerCamelCase , **_lowerCamelCase )
_snake_case = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCAmelCase__ ( A_ ):
__a = FlaxBigBirdForNaturalQuestionsModule
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Any:
def cross_entropy(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=None ):
_snake_case = logits.shape[-1]
_snake_case = (labels[..., None] == jnp.arange(__lowerCamelCase )[None]).astype('''f4''' )
_snake_case = jax.nn.log_softmax(__lowerCamelCase , axis=-1 )
_snake_case = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_snake_case = reduction(__lowerCamelCase )
return loss
_snake_case = partial(__lowerCamelCase , reduction=jnp.mean )
_snake_case = cross_entropy(__lowerCamelCase , __lowerCamelCase )
_snake_case = cross_entropy(__lowerCamelCase , __lowerCamelCase )
_snake_case = cross_entropy(__lowerCamelCase , __lowerCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCAmelCase__ :
__a = "google/bigbird-roberta-base"
__a = 3000
__a = 10500
__a = 128
__a = 3
__a = 1
__a = 5
# tx_args
__a = 3e-5
__a = 0.0
__a = 20000
__a = 0.0095
__a = "bigbird-roberta-natural-questions"
__a = "training-expt"
__a = "data/nq-training.jsonl"
__a = "data/nq-validation.jsonl"
def lowercase ( self : Optional[Any] ):
os.makedirs(self.base_dir , exist_ok=_lowerCamelCase )
_snake_case = os.path.join(self.base_dir , self.save_dir )
_snake_case = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = 4096 # no dynamic padding on TPUs
def __call__( self : Dict , _lowerCamelCase : Any ):
_snake_case = self.collate_fn(_lowerCamelCase )
_snake_case = jax.tree_util.tree_map(_lowerCamelCase , _lowerCamelCase )
return batch
def lowercase ( self : Dict , _lowerCamelCase : str ):
_snake_case , _snake_case = self.fetch_inputs(features['''input_ids'''] )
_snake_case = {
'''input_ids''': jnp.array(_lowerCamelCase , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(_lowerCamelCase , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def lowercase ( self : List[Any] , _lowerCamelCase : list ):
_snake_case = [self._fetch_inputs(_lowerCamelCase ) for ids in input_ids]
return zip(*_lowerCamelCase )
def lowercase ( self : Optional[Any] , _lowerCamelCase : list ):
_snake_case = [1 for _ in range(len(_lowerCamelCase ) )]
while len(_lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=None ) -> str:
if seed is not None:
_snake_case = dataset.shuffle(seed=__lowerCamelCase )
for i in range(len(__lowerCamelCase ) // batch_size ):
_snake_case = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__lowerCamelCase )
@partial(jax.pmap , axis_name='''batch''' )
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
def loss_fn(__lowerCamelCase : Union[str, Any] ):
_snake_case = model_inputs.pop('''start_labels''' )
_snake_case = model_inputs.pop('''end_labels''' )
_snake_case = model_inputs.pop('''pooled_labels''' )
_snake_case = state.apply_fn(**__lowerCamelCase , params=__lowerCamelCase , dropout_rng=__lowerCamelCase , train=__lowerCamelCase )
_snake_case , _snake_case , _snake_case = outputs
return state.loss_fn(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_snake_case , _snake_case = jax.random.split(__lowerCamelCase )
_snake_case = jax.value_and_grad(__lowerCamelCase )
_snake_case , _snake_case = grad_fn(state.params )
_snake_case = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_snake_case = jax.lax.pmean(__lowerCamelCase , '''batch''' )
_snake_case = state.apply_gradients(grads=__lowerCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def _UpperCAmelCase ( __lowerCamelCase : str , **__lowerCamelCase : List[str] ) -> Any:
_snake_case = model_inputs.pop('''start_labels''' )
_snake_case = model_inputs.pop('''end_labels''' )
_snake_case = model_inputs.pop('''pooled_labels''' )
_snake_case = state.apply_fn(**__lowerCamelCase , params=state.params , train=__lowerCamelCase )
_snake_case , _snake_case , _snake_case = outputs
_snake_case = state.loss_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class lowerCAmelCase__ ( train_state.TrainState ):
__a = struct.field(pytree_node=A_ )
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = 42
__a = 42
__a = 42
__a = 42
__a = 42
__a = None
def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict=None ):
_snake_case = model.params
_snake_case = TrainState.create(
apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , loss_fn=_lowerCamelCase , )
if ckpt_dir is not None:
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = restore_checkpoint(_lowerCamelCase , _lowerCamelCase )
_snake_case = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_snake_case , _snake_case = build_tx(**_lowerCamelCase )
_snake_case = train_state.TrainState(
step=_lowerCamelCase , apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , opt_state=_lowerCamelCase , )
_snake_case = args
_snake_case = data_collator
_snake_case = lr
_snake_case = params
_snake_case = jax_utils.replicate(_lowerCamelCase )
return state
def lowercase ( self : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str ):
_snake_case = self.args
_snake_case = len(_lowerCamelCase ) // args.batch_size
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(_lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
_snake_case = jnp.array(0 , dtype=jnp.floataa )
_snake_case = get_batched_dataset(_lowerCamelCase , args.batch_size , seed=_lowerCamelCase )
_snake_case = 0
for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc=f'''Running EPOCH-{epoch}''' ):
_snake_case = self.data_collator(_lowerCamelCase )
_snake_case , _snake_case , _snake_case = self.train_step_fn(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_snake_case = jax_utils.unreplicate(state.step )
_snake_case = running_loss.item() / i
_snake_case = self.scheduler_fn(state_step - 1 )
_snake_case = self.evaluate(_lowerCamelCase , _lowerCamelCase )
_snake_case = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(_lowerCamelCase ) )
self.logger.log(_lowerCamelCase , commit=_lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=_lowerCamelCase )
def lowercase ( self : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] ):
_snake_case = get_batched_dataset(_lowerCamelCase , self.args.batch_size )
_snake_case = len(_lowerCamelCase ) // self.args.batch_size
_snake_case = jnp.array(0 , dtype=jnp.floataa )
_snake_case = 0
for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc='''Evaluating ... ''' ):
_snake_case = self.data_collator(_lowerCamelCase )
_snake_case = self.val_step_fn(_lowerCamelCase , **_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def lowercase ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict ):
_snake_case = jax_utils.unreplicate(_lowerCamelCase )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' )
self.model_save_fn(_lowerCamelCase , params=state.params )
with open(os.path.join(_lowerCamelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCamelCase , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(_lowerCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(_lowerCamelCase , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , _lowerCamelCase )
print('''DONE''' )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Tuple:
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(__lowerCamelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_snake_case = from_bytes(state.params , f.read() )
with open(os.path.join(__lowerCamelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_snake_case = from_bytes(state.opt_state , f.read() )
_snake_case = joblib.load(os.path.join(__lowerCamelCase , '''args.joblib''' ) )
_snake_case = joblib.load(os.path.join(__lowerCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(__lowerCamelCase , '''training_state.json''' ) , '''r''' ) as f:
_snake_case = json.load(__lowerCamelCase )
_snake_case = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
_snake_case = num_train_steps - warmup_steps
_snake_case = optax.linear_schedule(init_value=__lowerCamelCase , end_value=__lowerCamelCase , transition_steps=__lowerCamelCase )
_snake_case = optax.linear_schedule(init_value=__lowerCamelCase , end_value=1E-7 , transition_steps=__lowerCamelCase )
_snake_case = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) -> List[str]:
def weight_decay_mask(__lowerCamelCase : List[Any] ):
_snake_case = traverse_util.flatten_dict(__lowerCamelCase )
_snake_case = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(__lowerCamelCase )
_snake_case = scheduler_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = optax.adamw(learning_rate=__lowerCamelCase , weight_decay=__lowerCamelCase , mask=__lowerCamelCase )
return tx, lr
| 40
|
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
__a = MODEL_FOR_MASKED_LM_MAPPING
__a = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase ( self : Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase ( self : Tuple ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
_snake_case = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 38015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 25506, '''token_str''': ''' accuser'''},
] , )
_snake_case = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-05,
'''token''': 38015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-05,
'''token''': 25506,
'''token_str''': ''' accuser''',
},
] , )
_snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowercase ( self : List[str] ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
_snake_case = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
_snake_case = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
_snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''},
] , )
_snake_case = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
[
{
'''score''': 2.2e-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def lowercase ( self : Optional[Any] ):
_snake_case = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
_snake_case = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
@require_torch
def lowercase ( self : Dict ):
_snake_case = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(_lowerCamelCase )
@slow
@require_tf
def lowercase ( self : Tuple ):
_snake_case = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(_lowerCamelCase )
def lowercase ( self : Tuple , _lowerCamelCase : Optional[int] ):
_snake_case = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
_snake_case = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_5_1,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_1_4,
'''token''': 12790,
'''token_str''': ''' Lyon''',
},
] , )
_snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowercase ( self : str ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
_snake_case = None
_snake_case = None
self.run_pipeline_test(_lowerCamelCase , [] )
@require_tf
def lowercase ( self : Any ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
_snake_case = None
_snake_case = None
self.run_pipeline_test(_lowerCamelCase , [] )
def lowercase ( self : int , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def lowercase ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ):
_snake_case = fill_masker.tokenizer
_snake_case = fill_masker.model
_snake_case = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_lowerCamelCase , [
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
] , )
with self.assertRaises(_lowerCamelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowerCamelCase ):
fill_masker('''This is''' )
self.run_test_top_k(_lowerCamelCase , _lowerCamelCase )
self.run_test_targets(_lowerCamelCase , _lowerCamelCase )
self.run_test_top_k_targets(_lowerCamelCase , _lowerCamelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowerCamelCase , _lowerCamelCase )
self.fill_mask_with_multiple_masks(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
_snake_case = tokenizer.get_vocab()
_snake_case = sorted(vocab.keys() )[:2]
# Pipeline argument
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase , targets=_lowerCamelCase )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowerCamelCase )
_snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowerCamelCase ) )
# Call argument
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowerCamelCase )
_snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowerCamelCase ) )
# Score equivalence
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase )
_snake_case = [top_mask['''token_str'''] for top_mask in outputs]
_snake_case = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCamelCase ) == set(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase )
_snake_case = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) )
# Raises with invalid
with self.assertRaises(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' )
def lowercase ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Tuple ):
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase , top_k=2 )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) )
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
_snake_case = tokenizer.get_vocab()
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
# top_k=2, ntargets=3
_snake_case = sorted(vocab.keys() )[:3]
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowerCamelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_snake_case = [el['''token_str'''] for el in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x["score"] , reverse=_lowerCamelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCamelCase ).issubset(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowerCamelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int ):
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = tokenizer.get_vocab()
# String duplicates + id duplicates
_snake_case = sorted(vocab.keys() )[:3]
_snake_case = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_snake_case = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=_lowerCamelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowerCamelCase ) , 3 )
def lowercase ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ):
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowerCamelCase , [
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
] , )
| 40
| 1
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''▁'''
__lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__lowerCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__lowerCAmelCase = {
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
__lowerCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCAmelCase : List[int] = []
lowerCAmelCase : List[int] = []
def __init__( self : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Union[str, Any]="<s>" ,_UpperCAmelCase : Union[str, Any]="</s>" ,_UpperCAmelCase : int="</s>" ,_UpperCAmelCase : Tuple="<pad>" ,_UpperCAmelCase : str="<unk>" ,_UpperCAmelCase : Tuple="m2m100" ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,_UpperCAmelCase : List[Any]=8 ,**_UpperCAmelCase : Union[str, Any] ,):
_a : int = {} if sp_model_kwargs is None else sp_model_kwargs
_a : int = language_codes
_a : List[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
_a : Union[str, Any] = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
_a : List[str] = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_UpperCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(_UpperCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_UpperCAmelCase ,tgt_lang=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,language_codes=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : Any = vocab_file
_a : int = load_json(_UpperCAmelCase )
_a : Dict = {v: k for k, v in self.encoder.items()}
_a : int = spm_file
_a : int = load_spm(_UpperCAmelCase ,self.sp_model_kwargs )
_a : Any = len(self.encoder )
_a : str = {
self.get_lang_token(_UpperCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase )
}
_a : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase )}
_a : Dict = {v: k for k, v in self.lang_token_to_id.items()}
_a : Union[str, Any] = src_lang if src_lang is not None else 'en'
_a : Optional[int] = tgt_lang
_a : Optional[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_a : List[Any] = num_madeup_words
@property
def __lowercase ( self : int ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowercase ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def __lowercase ( self : str ,_UpperCAmelCase : str ):
_a : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ):
return self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_UpperCAmelCase ,self.encoder[self.unk_token] )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_UpperCAmelCase ,self.unk_token )
def __lowercase ( self : int ,_UpperCAmelCase : Dict ):
_a : Dict = []
_a : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_a : Optional[Any] = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
_a : List[str] = [1] * len(self.prefix_tokens )
_a : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self : List[Any] ):
_a : Dict = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
_a : Dict = self.__dict__.copy()
_a : Any = None
return state
def __setstate__( self : Union[str, Any] ,_UpperCAmelCase : Dict ):
_a : List[Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Union[str, Any] = {}
_a : Any = load_spm(self.spm_file ,self.sp_model_kwargs )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : List[str] = Path(_UpperCAmelCase )
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""" )
_a : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_a : Tuple = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,_UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,_UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase ,'wb' ) as fi:
_a : Tuple = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str = "en" ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : str = "ro" ,**_UpperCAmelCase : List[Any] ,):
_a : int = src_lang
_a : List[str] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] ,**_UpperCAmelCase : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_a : List[str] = src_lang
_a : Optional[int] = self(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[str] = self.get_lang_id(_UpperCAmelCase )
_a : List[str] = tgt_lang_id
return inputs
def __lowercase ( self : List[str] ):
self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self : Any ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ):
_a : Tuple = self.get_lang_token(_UpperCAmelCase )
_a : Optional[Any] = self.lang_token_to_id[lang_token]
_a : Union[str, Any] = [self.cur_lang_id]
_a : Union[str, Any] = [self.eos_token_id]
def __lowercase ( self : List[str] ,_UpperCAmelCase : str ):
_a : int = self.get_lang_token(_UpperCAmelCase )
_a : Optional[Any] = self.lang_token_to_id[lang_token]
_a : Dict = [self.cur_lang_id]
_a : Optional[int] = [self.eos_token_id]
def __lowercase ( self : str ,_UpperCAmelCase : str ):
return self.lang_code_to_token[lang]
def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ):
_a : Optional[int] = self.get_lang_token(_UpperCAmelCase )
return self.lang_token_to_id[lang_token]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> sentencepiece.SentencePieceProcessor:
_a : int = sentencepiece.SentencePieceProcessor(**lowerCAmelCase_ )
spm.Load(str(lowerCAmelCase_ ) )
return spm
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[Dict, List]:
with open(lowerCAmelCase_ , 'r' ) as f:
return json.load(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
with open(lowerCAmelCase_ , 'w' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=2 )
| 89
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
| 1
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase : Dict = 4
UpperCAmelCase : Optional[Any] = 3
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
pass
def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = int(os.environ["""RANK"""] )
__UpperCAmelCase : Optional[Any] = int(os.environ["""WORLD_SIZE"""] )
__UpperCAmelCase : Tuple = ArgumentParser()
parser.add_argument("""--streaming""" , type=lowerCAmelCase__ )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase__ )
parser.add_argument("""--num_workers""" , type=lowerCAmelCase__ , default=0 )
__UpperCAmelCase : Dict = parser.parse_args()
__UpperCAmelCase : Dict = args.streaming
__UpperCAmelCase : Optional[int] = args.num_workers
__UpperCAmelCase : str = {"""shards""": [f'''shard_{shard_idx}''' for shard_idx in range(lowerCAmelCase__ )]}
__UpperCAmelCase : Tuple = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ )
if not streaming:
__UpperCAmelCase : Tuple = Dataset.from_list(list(lowerCAmelCase__ ) )
__UpperCAmelCase : Any = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ )
__UpperCAmelCase : str = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ )
__UpperCAmelCase : List[str] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__UpperCAmelCase : List[Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__UpperCAmelCase : List[str] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 366
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""keras_nlp"""]
def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ["""keras_nlp"""] )
| 320
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__: Tuple = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Optional[int] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCamelCase__: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 23
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''maskformer-swin'''
__UpperCamelCase : Any = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Dict=9_6 , lowerCAmelCase_ : Union[str, Any]=[2, 2, 6, 2] , lowerCAmelCase_ : Optional[Any]=[3, 6, 1_2, 2_4] , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Optional[Any]=4.0 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[Any] = image_size
_A: Optional[int] = patch_size
_A: Optional[Any] = num_channels
_A: str = embed_dim
_A: Any = depths
_A: str = len(lowerCAmelCase_ )
_A: Any = num_heads
_A: int = window_size
_A: Dict = mlp_ratio
_A: str = qkv_bias
_A: List[str] = hidden_dropout_prob
_A: List[Any] = attention_probs_dropout_prob
_A: Dict = drop_path_rate
_A: List[Any] = hidden_act
_A: Optional[int] = use_absolute_embeddings
_A: Tuple = layer_norm_eps
_A: Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A: Any = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
_A: Tuple = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_A , _A: str = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 121
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (__lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : List[str] = SpeechTaTokenizer
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Dict = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = SpeechTaTokenizer(UpperCamelCase_ )
lowercase__ = AddedToken('<mask>' , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
lowercase__ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : str )-> Tuple:
"""simple docstring"""
lowercase__ = 'this is a test'
lowercase__ = 'this is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[Any] , a : List[str]=False , a : Any=20 , a : Tuple=5 )-> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.get_input_output_texts(UpperCamelCase_ )
lowercase__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCamelCase_ ) , 81 )
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ = tokenizer.vocab_size
lowercase__ = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowercase__ = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowercase__ = tokenizer.add_tokens(UpperCamelCase_ )
lowercase__ = tokenizer.vocab_size
lowercase__ = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , all_size + len(UpperCamelCase_ ) )
lowercase__ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowercase__ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowercase__ = tokenizer.add_special_tokens(UpperCamelCase_ )
lowercase__ = tokenizer.vocab_size
lowercase__ = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , all_size_a + len(UpperCamelCase_ ) )
lowercase__ = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> str:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : str )-> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCamelCase_ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
# fmt: off
self.assertListEqual(UpperCamelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
lowercase__ = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCamelCase_ , )
| 357
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_UpperCamelCase : ClassVar[Features] = Features({'text': Value('string' )} )
_UpperCamelCase : ClassVar[Features] = Features({} )
_UpperCamelCase : str = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 269
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Dict = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280
|
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCamelCase__ = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
UpperCamelCase__ = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCamelCase__ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if "handwritten" in checkpoint_url:
UpperCamelCase__ = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCamelCase__ = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ViTConfig(image_size=3_84 , qkv_bias=_UpperCamelCase )
UpperCamelCase__ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCamelCase__ = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 10_24
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCamelCase__ = False
UpperCamelCase__ = "relu"
UpperCamelCase__ = 10_24
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
# load HuggingFace model
UpperCamelCase__ = ViTModel(_UpperCamelCase , add_pooling_layer=_UpperCamelCase )
UpperCamelCase__ = TrOCRForCausalLM(_UpperCamelCase )
UpperCamelCase__ = VisionEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
UpperCamelCase__ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" , check_hash=_UpperCamelCase )["model"]
UpperCamelCase__ = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCamelCase__ = state_dict.pop(_UpperCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
UpperCamelCase__ = val
else:
UpperCamelCase__ = val
# load state dict
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
UpperCamelCase__ = ViTImageProcessor(size=encoder_config.image_size )
UpperCamelCase__ = RobertaTokenizer.from_pretrained("roberta-large" )
UpperCamelCase__ = TrOCRProcessor(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = processor(images=prepare_img(_UpperCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
UpperCamelCase__ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCamelCase__ = model(pixel_values=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
UpperCamelCase__ = outputs.logits
UpperCamelCase__ = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCamelCase__ = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCamelCase__ = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
UpperCamelCase__ = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
UpperCamelCase__ = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCamelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__lowercase: Dict = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowercase: Union[str, Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowercase ( )-> Any:
'''simple docstring'''
a : str = 9
a : Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a : List[str] = kruskal(A_ , A_ )
a : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(A_ ) == sorted(A_ )
| 40
|
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int):
a : Tuple = size
a : Dict = [0] * size
a : Optional[int] = [0] * size
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return index | (index + 1)
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return (index & (index + 1)) - 1
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Union[str, Any] = value
while index < self.size:
a : Dict = self.get_prev(__UpperCAmelCase) + 1
if current_left_border == index:
a : Optional[int] = value
else:
a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.get_next(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
right -= 1 # Because of right is exclusive
a : List[str] = 0
while left <= right:
a : Dict = self.get_prev(__UpperCAmelCase)
if left <= current_left:
a : Optional[int] = max(__UpperCAmelCase , self.tree[right])
a : Optional[Any] = current_left
else:
a : List[str] = max(__UpperCAmelCase , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_a : List[Any]= {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any]= [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_a : Any= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : Union[str, Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__snake_case : Union[str, Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case : List[str] = 1, 0, a
__snake_case , __snake_case , __snake_case : Dict = 0, 1, m
while va != 0:
__snake_case : List[str] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 95
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
)
def __UpperCAmelCase ( ) -> None:
lowercase__ : Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ : Optional[Any] = math.log(len(__lowerCamelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 16
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
for char in word:
_a = ord(_lowerCAmelCase )
if not _is_chinese_char(_lowerCAmelCase ):
return 0
return 1
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = set()
for token in tokens:
_a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase )
if chinese_word:
word_set.add(_lowerCAmelCase )
_a = list(_lowerCAmelCase )
return word_list
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a = max([len(_lowerCAmelCase ) for w in chinese_word_set] )
_a = bert_tokens
_a , _a = 0, len(_lowerCAmelCase )
while start < end:
_a = True
if is_chinese(bert_word[start] ):
_a = min(end - start, _lowerCAmelCase )
for i in range(_lowerCAmelCase, 1, -1 ):
_a = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_a = '''##''' + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ):
"""simple docstring"""
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_a = [get_chinese_word(_lowerCAmelCase ) for r in res]
ltp_res.extend(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase )
input_tokens.append(_lowerCAmelCase )
_a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase )
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase ):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ):
ref_id.append(_lowerCAmelCase )
ref_ids.append(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
return ref_ids
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp ) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert )
_a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__snake_case = parser.parse_args()
main(args)
| 320
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCAmelCase = str(bin(A ) )[2:] # remove the leading "0b"
UpperCAmelCase = str(bin(A ) )[2:] # remove the leading "0b"
UpperCAmelCase = max(len(A ) , len(A ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(A ) , b_binary.zfill(A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
|
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowercase : List[str] = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ ( A : str , A : str , A : List[Any]=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase = random.Random()
UpperCAmelCase = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase = []
for _ in range(A ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase = np.array(A , dtype=jnp.intaa ).reshape(A )
return output
def lowerCamelCase__ ( A : int , A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase = ids_tensor(A , vocab_size=2 , rng=A )
# make sure that at least one token is attended to for each batch
UpperCAmelCase = 1
return attn_mask
@require_flax
class UpperCamelCase__:
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[Any] = ()
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase = 2
UpperCAmelCase = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase = jnp.ones_like(lowerCAmelCase )
UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = pt_model_class(lowerCAmelCase ).eval()
UpperCAmelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase , flax_model.params )
UpperCAmelCase = flax_model.generate(lowerCAmelCase ).sequences
UpperCAmelCase = pt_model.generate(torch.tensor(lowerCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : str )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
UpperCAmelCase = 0.8
UpperCAmelCase = 10
UpperCAmelCase = 0.3
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = 2
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase = '''Hello world'''
UpperCAmelCase = tokenizer(lowerCAmelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase , '''do_samples''' ):
model.generate(lowerCAmelCase , do_samples=lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase , '''foo''' ):
UpperCAmelCase = {'''foo''': '''bar'''}
model.generate(lowerCAmelCase , **lowerCAmelCase )
| 91
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__SCREAMING_SNAKE_CASE :Any = None
__SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :str = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
__SCREAMING_SNAKE_CASE :str = '''▁'''
# Segments (not really needed)
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0
__SCREAMING_SNAKE_CASE :List[str] = 1
__SCREAMING_SNAKE_CASE :Tuple = 2
__SCREAMING_SNAKE_CASE :Any = 3
__SCREAMING_SNAKE_CASE :Union[str, Any] = 4
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[Any] = """left"""
_lowerCamelCase : Any = XLNetTokenizer
def __init__( self : List[str] , snake_case_ : Union[str, Any]=None , snake_case_ : str=None , snake_case_ : Optional[Any]=False , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str="<s>" , snake_case_ : Any="</s>" , snake_case_ : int="<unk>" , snake_case_ : str="<sep>" , snake_case_ : Tuple="<pad>" , snake_case_ : Any="<cls>" , snake_case_ : Union[str, Any]="<mask>" , snake_case_ : int=["<eop>", "<eod>"] , **snake_case_ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase ( self : Tuple , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22
|
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 269
| 0
|
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_lowercase : Optional[int] = 'scheduler_config.json'
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_a = 1
_a = 2
_a = 3
_a = 4
_a = 5
_a = 6
_a = 7
_a = 8
_a = 9
_a = 1_0
_a = 1_1
_a = 1_2
_a = 1_3
_a = 1_4
@dataclass
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_a = 4_2
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = SCHEDULER_CONFIG_NAME
_a = []
_a = True
@classmethod
def snake_case ( cls : List[Any], lowerCamelCase : Dict[str, Any] = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Tuple=False, **lowerCamelCase : Union[str, Any], )-> Optional[int]:
lowerCamelCase__ : List[Any] =cls.load_config(
pretrained_model_name_or_path=_SCREAMING_SNAKE_CASE, subfolder=_SCREAMING_SNAKE_CASE, return_unused_kwargs=_SCREAMING_SNAKE_CASE, return_commit_hash=_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE, )
return cls.from_config(_SCREAMING_SNAKE_CASE, return_unused_kwargs=_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any], lowerCamelCase : Union[str, os.PathLike], lowerCamelCase : bool = False, **lowerCamelCase : Dict )-> List[Any]:
self.save_config(save_directory=_SCREAMING_SNAKE_CASE, push_to_hub=_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : Dict )-> Optional[int]:
return self._get_compatibles()
@classmethod
def snake_case ( cls : int )-> Optional[Any]:
lowerCamelCase__ : List[str] =list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase__ : Optional[Any] =importlib.import_module(__name__.split('''.''' )[0] )
lowerCamelCase__ : str =[
getattr(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
]
return compatible_classes
| 364
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
def snake_case__ ( __lowerCamelCase : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , np.ndarray ):
return list(tensor.shape )
lowerCamelCase__ : List[Any] =tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
lowerCamelCase__ : List[str] =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def snake_case__ ( __lowerCamelCase : tf.Tensor , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowerCamelCase , name=__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : str=-1 ):
"""simple docstring"""
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =tf.nn.moments(__lowerCamelCase , axes=[axis] , keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCamelCase__ : Optional[Any] =[1] * inputs.shape.rank
lowerCamelCase__ : Union[str, Any] =shape_list(__lowerCamelCase )[axis]
lowerCamelCase__ : Optional[Any] =tf.reshape(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Any =tf.reshape(__lowerCamelCase , __lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowerCamelCase__ : List[str] =tf.nn.batch_normalization(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , offset=__lowerCamelCase , scale=__lowerCamelCase , variance_epsilon=__lowerCamelCase , )
return outputs
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : int=-1 ):
"""simple docstring"""
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCamelCase__ : Optional[int] =tf.shape(__lowerCamelCase )
lowerCamelCase__ : Optional[int] =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCamelCase__ : List[Any] =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
def snake_case__ ( __lowerCamelCase : tf.Tensor ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , tf.Tensor ):
lowerCamelCase__ : List[Any] =tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCamelCase__ : int =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCamelCase__ : int =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCamelCase__ : str =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case__ ( __lowerCamelCase : tf.Tensor , __lowerCamelCase : int , __lowerCamelCase : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
__lowerCamelCase , tf.cast(__lowerCamelCase , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : Any =64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCamelCase__ : Tuple =[x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
lowerCamelCase__ : Optional[Any] =np.asarray(__lowerCamelCase )
lowerCamelCase__ : str =1
lowerCamelCase__ : List[Any] =np.array_split(__lowerCamelCase , __lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCamelCase__ : Union[str, Any] =np.array_split(__lowerCamelCase , __lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
lowerCamelCase__ : List[str] =chunk_data
else:
lowerCamelCase__ : Dict =data
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if name in group.attrs:
lowerCamelCase__ : Optional[int] =[n.decode('''utf8''' ) if hasattr(__lowerCamelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
lowerCamelCase__ : str =[]
lowerCamelCase__ : str =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__lowerCamelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
def _expand_single_ad_tensor(__lowerCamelCase : List[Any] ):
if isinstance(__lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCamelCase )
| 272
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Any , A : Optional[int]=None , A : Tuple=None , *A : Tuple , **A : List[str] ):
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_UpperCAmelCase : str = self.model.config
else:
_UpperCAmelCase : List[str] = config
_UpperCAmelCase : List[Any] = data_args
_UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
_UpperCAmelCase : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase : Dict = label_smoothed_nll_loss
def _A ( self : Tuple , A : int ):
if self.optimizer is None:
_UpperCAmelCase : Tuple = ["bias", "LayerNorm.weight"]
_UpperCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_UpperCAmelCase : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase : List[str] = Adafactor
_UpperCAmelCase : List[Any] = {"scale_parameter": False, "relative_step": False}
else:
_UpperCAmelCase : List[str] = AdamW
_UpperCAmelCase : List[str] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_UpperCAmelCase : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase : List[Any] = OSS(
params=A , optim=A , **A , )
else:
_UpperCAmelCase : Union[str, Any] = optimizer_cls(A , **A )
if self.lr_scheduler is None:
_UpperCAmelCase : List[str] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _A ( self : List[str] , A : Optional[int] ):
_UpperCAmelCase : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def _A ( self : Tuple ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _A ( self : Any , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase : List[str] = model(**A , use_cache=A )[0]
_UpperCAmelCase : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase : Any = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase : Optional[int] = model(**A , use_cache=A )[0]
_UpperCAmelCase : List[str] = torch.nn.functional.log_softmax(A , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _A ( self : List[str] , A : Optional[int] , A : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = inputs.pop("labels" )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._compute_loss(A , A , A )
return loss
def _A ( self : List[str] , A : nn.Module , A : Dict[str, Union[torch.Tensor, Any]] , A : bool , A : Optional[List[str]] = None , ):
_UpperCAmelCase : List[str] = self._prepare_inputs(A )
_UpperCAmelCase : Dict = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : int = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
_UpperCAmelCase : Any = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase : str = self._compute_loss(A , A , A )
_UpperCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _A ( self : Dict , A : int , A : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F""" padded to `max_length`={max_length}""" )
_UpperCAmelCase : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase : Tuple = tensor
return padded_tensor
| 31
| 1
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> List[Any]:
_a = parent
_a = batch_size
_a = encoder_seq_length
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = d_ff
_a = relative_attention_num_buckets
_a = dropout_rate
_a = initializer_factor
_a = eos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = None
_a = decoder_layers
def _UpperCAmelCase ( self ) -> Optional[Any]:
return TaConfig.from_pretrained('''google/umt5-base''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Union[str, Any]:
if attention_mask is None:
_a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
_a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
_a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self ) -> Dict:
_a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a = input_ids.clamp(self.pad_token_id + 1 )
_a = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a = self.get_config()
_a = config.num_attention_heads
_a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, input_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self ) -> Any:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[str]:
_a = UMTaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , )
_a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
_a = result.last_hidden_state
_a = result.past_key_values
_a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Tuple:
_a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
# first forward pass
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(__UpperCAmelCase )['''last_hidden_state''']
_a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
_a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval()
_a = model(**__UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() )
@require_torch
class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A_ : Dict = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A_ : Tuple = True
A_ : Dict = False
A_ : List[str] = False
A_ : Optional[int] = True
A_ : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A_ : List[Any] = [0.8, 0.9]
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
_a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_a = self.model_tester.prepare_config_and_inputs()
_a = config_and_inputs[0]
_a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval()
model.to(__UpperCAmelCase )
_a = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ):
_a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase )
_a = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _UpperCAmelCase ( self ) -> str:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _UpperCAmelCase ( self ) -> Tuple:
_a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
_a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase )
_a = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids
# fmt: off
_a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase )
_a = model.generate(input_ids.to(__UpperCAmelCase ) )
_a = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 153
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A_ : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
A_ : ClassVar[Features] = Features({'text': Value('string' )} )
A_ : ClassVar[Features] = Features({'summary': Value('string' )} )
A_ : str = "text"
A_ : str = "summary"
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 153
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = '''trocr'''
_UpperCAmelCase : int = ['''past_key_values''']
_UpperCAmelCase : Any = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=5_0_2_6_5 ,SCREAMING_SNAKE_CASE__ : int=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : str=1_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Any=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : str=0 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,**SCREAMING_SNAKE_CASE__ : int ,):
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : Union[str, Any] = decoder_layers
__lowerCamelCase : Optional[int] = decoder_attention_heads
__lowerCamelCase : str = decoder_ffn_dim
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : Any = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Optional[Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : Dict = use_cache
__lowerCamelCase : Dict = scale_embedding
__lowerCamelCase : List[str] = use_learned_position_embeddings
__lowerCamelCase : int = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
| 73
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95
| 0
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCAmelCase : Optional[int] = 'pt'
elif is_tf_available():
UpperCAmelCase : Tuple = 'tf'
else:
UpperCAmelCase : Tuple = 'jax'
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = PerceiverTokenizer
__a = False
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def lowerCamelCase__ ( self : int , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict=False , UpperCamelCase : str=20 , UpperCamelCase : str=5 ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = []
for i in range(len(UpperCamelCase ) ):
try:
__UpperCAmelCase : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCAmelCase : Optional[Any] = list(filter(lambda UpperCamelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase ) )
__UpperCAmelCase : List[str] = list(filter(lambda UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase ) , UpperCamelCase ) )
if max_length is not None and len(UpperCamelCase ) > max_length:
__UpperCAmelCase : int = toks[:max_length]
if min_length is not None and len(UpperCamelCase ) < min_length and len(UpperCamelCase ) > 0:
while len(UpperCamelCase ) < min_length:
__UpperCAmelCase : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCAmelCase : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
__UpperCAmelCase : Any = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
if " " not in output_txt and len(UpperCamelCase ) > 1:
__UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase )
)
if with_prefix_space:
__UpperCAmelCase : str = """ """ + output_txt
__UpperCAmelCase : int = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
return output_txt, output_ids
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = self.perceiver_tokenizer
__UpperCAmelCase : int = """Unicode €."""
__UpperCAmelCase : int = tokenizer(UpperCamelCase )
__UpperCAmelCase : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase )
# decoding
__UpperCAmelCase : Optional[int] = tokenizer.decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , """[CLS]Unicode €.[SEP]""" )
__UpperCAmelCase : str = tokenizer("""e è é ê ë""" )
__UpperCAmelCase : int = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase )
# decoding
__UpperCAmelCase : Tuple = tokenizer.decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.perceiver_tokenizer
__UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__UpperCAmelCase : Tuple = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
if FRAMEWORK != "jax":
__UpperCAmelCase : Any = list(batch.input_ids.numpy()[0] )
else:
__UpperCAmelCase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
__UpperCAmelCase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : List[Any] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCamelCase )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertNotIn("""decoder_input_ids""" , UpperCamelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = self.perceiver_tokenizer
__UpperCAmelCase : int = [
"""Summary of the text.""",
"""Another summary.""",
]
__UpperCAmelCase : Any = tokenizer(
text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase , return_tensors=UpperCamelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
__UpperCAmelCase : Dict = """ He is very happy, UNwant\u00E9d,running"""
__UpperCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
tokenizer.save_pretrained(UpperCamelCase )
__UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = after_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
shutil.rmtree(UpperCamelCase )
__UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Tuple = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__UpperCAmelCase : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__UpperCAmelCase : int = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
tokenizer.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Any = tokenizer.__class__.from_pretrained(UpperCamelCase )
__UpperCAmelCase : List[Any] = after_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase )
with open(os.path.join(UpperCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCAmelCase : List[Any] = json.load(UpperCamelCase )
with open(os.path.join(UpperCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCAmelCase : List[Any] = json.load(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(125 )]
__UpperCAmelCase : str = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__UpperCAmelCase : str = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase , UpperCamelCase )
with open(os.path.join(UpperCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase , UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(
UpperCamelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
UpperCamelCase , additional_special_tokens=UpperCamelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , """�""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.get_tokenizers(fast=UpperCamelCase , do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
__UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
| 320
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320
| 1
|
"""simple docstring"""
def _A (__a ) -> list[list]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = current_set.copy()
for row_index, row in enumerate(__a ):
SCREAMING_SNAKE_CASE_ : int = row[0]
for column_index, column in enumerate(__a ):
if magnitude == 0:
SCREAMING_SNAKE_CASE_ : str = column
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE_ : Optional[Any] = current_set[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = [first_row]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__a )
continue
for column_index in range(len(__a ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__a )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE_ : str = final_set[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = simplify(__a )
for i in range(len(__a ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __a )
SCREAMING_SNAKE_CASE_ : Tuple = resultant
return final_set
def _A (__a ) -> list:
"""simple docstring"""
if len(__a ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
SCREAMING_SNAKE_CASE_ : int = len(__a ) + 1
if any(len(__a ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__a , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__a ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE_ : List[Any] = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = data_set.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for row_index, row in enumerate(__a ):
if 0 not in row:
SCREAMING_SNAKE_CASE_ : Any = data_set.pop(__a )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , __a )
SCREAMING_SNAKE_CASE_ : Optional[int] = data_set.copy()
SCREAMING_SNAKE_CASE_ : int = simplify(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = simplified[::-1]
SCREAMING_SNAKE_CASE_ : list = []
for row in simplified:
SCREAMING_SNAKE_CASE_ : Dict = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE_ : List[str] = row.copy()[: len(__a ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__a ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE_ : Any = temp_row[1::]
SCREAMING_SNAKE_CASE_ : Dict = temp_row[::-1]
for column_index, column in enumerate(__a ):
current_solution -= column * solutions[column_index]
solutions.append(__a )
SCREAMING_SNAKE_CASE_ : Any = []
for item in solutions:
final.append(float(round(__a , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 91
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
if gpta_config_file == "":
SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaConfig()
else:
SCREAMING_SNAKE_CASE_ : Tuple = GPTaConfig.from_json_file(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaModel(__a )
# Load weights from numpy
load_tf_weights_in_gpta(__a , __a , __a )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , __a )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 91
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Optional[Any] = logging.get_logger(__name__)
def a( A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
a = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
a = 1024
a = 4096
a = 24
a = 16
a = [5, 11, 17, 23]
a = [256, 512, 1024, 1024]
a = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
a = 768
a = [1, 1, 1, 0.5]
a = [256, 512, 768, 768]
a = 150
a = 16
a = (1, 384, 384)
a = False
a = "project"
if "ade" in checkpoint_url:
a = True
a = 768
a = [1, 1, 1, 0.5]
a = 150
a = 16
a = "huggingface/label-files"
a = "ade20k-id2label.json"
a = json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = [1, 150, 480, 480]
return config, expected_shape
def a( A : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(A , A )
def a( A : List[str] ) -> int:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
a = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
a = name.replace("patch_embed" , "" )
if "pos_embed" in name:
a = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
a = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
a = name.replace("proj" , "projection" )
if "blocks" in name:
a = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
a = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
a = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
a = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
a = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
a = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
a = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
a = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
a = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
a = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
a = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
a = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
a = name.replace("conv1" , "convolution1" )
if "conv2" in name:
a = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
a = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
a = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
a = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
a = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
a = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
a = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
a = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
a = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
a = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
a = name.replace("pretrained" , "dpt" )
if "bn" in name:
a = name.replace("bn" , "batch_norm" )
if "head" in name:
a = name.replace("head" , "head.head" )
if "encoder.norm" in name:
a = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
a = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
a = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
a = name.replace(".." , "." )
if "stem.conv" in name:
a = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
a = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
a = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
a = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
a = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
a = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
a = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def a( A : List[Any] , A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
a = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[: config.hidden_size, :]
a = in_proj_bias[: config.hidden_size]
a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a = in_proj_weight[
-config.hidden_size :, :
]
a = in_proj_bias[-config.hidden_size :]
def a( ) -> Any:
"""simple docstring"""
a = "http://images.cocodataset.org/val2017/000000039769.jpg"
a = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def a( A : int , A : Optional[Any] , A : str , A : Union[str, Any] , A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
a , a = get_dpt_config(A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
a = torch.load(A , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(A )
# rename keys
for key in state_dict.copy().keys():
a = state_dict.pop(A )
a = val
# read in qkv matrices
read_in_q_k_v(A , A )
# load HuggingFace model
a = DPTForSemanticSegmentation(A ) if "ade" in checkpoint_url else DPTForDepthEstimation(A )
model.load_state_dict(A )
model.eval()
# Check outputs on an image
a = 480 if "ade" in checkpoint_url else 384
a = DPTImageProcessor(size=A )
a = prepare_img()
a = image_processor(A , return_tensors="pt" )
# forward pass
a = model(**A ).logits if "ade" in checkpoint_url else model(**A ).predicted_depth
if show_prediction:
a = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_lowercase: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_lowercase: Optional[int] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 359
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = 384
a = 7
if "tiny" in model_name:
a = 96
a = (2, 2, 6, 2)
a = (3, 6, 12, 24)
elif "small" in model_name:
a = 96
a = (2, 2, 18, 2)
a = (3, 6, 12, 24)
elif "base" in model_name:
a = 128
a = (2, 2, 18, 2)
a = (4, 8, 16, 32)
a = 12
a = 512
elif "large" in model_name:
a = 192
a = (2, 2, 18, 2)
a = (6, 12, 24, 48)
a = 12
a = 768
# set label information
a = 150
a = "huggingface/label-files"
a = "ade20k-id2label.json"
a = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = {v: k for k, v in idalabel.items()}
a = SwinConfig(
embed_dim=A , depths=A , num_heads=A , window_size=A , out_features=["stage1", "stage2", "stage3", "stage4"] , )
a = UperNetConfig(
backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , )
return config
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def a( A : List[str] , A : List[str] , A : Dict ) -> Any:
"""simple docstring"""
a = dct.pop(A )
a = val
def a( A : str , A : List[str] ) -> List[Any]:
"""simple docstring"""
a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
a = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[:dim, :]
a = in_proj_bias[: dim]
a = in_proj_weight[
dim : dim * 2, :
]
a = in_proj_bias[
dim : dim * 2
]
a = in_proj_weight[
-dim :, :
]
a = in_proj_bias[-dim :]
# fmt: on
def a( A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
a , a = x.shape
a = x.reshape(A , 4 , in_channel // 4 )
a = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A , A )
return x
def a( A : int ) -> Dict:
"""simple docstring"""
a , a = x.shape
a = x.reshape(A , in_channel // 4 , 4 )
a = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A , A )
return x
def a( A : List[Any] ) -> Dict:
"""simple docstring"""
a = x.shape[0]
a = x.reshape(4 , in_channel // 4 )
a = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A )
return x
def a( A : Optional[Any] ) -> List[str]:
"""simple docstring"""
a = x.shape[0]
a = x.reshape(in_channel // 4 , 4 )
a = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A )
return x
def a( A : Any , A : int , A : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
a = model_name_to_url[model_name]
a = torch.hub.load_state_dict_from_url(A , map_location="cpu" , file_name=A )[
"state_dict"
]
for name, param in state_dict.items():
print(A , param.shape )
a = get_upernet_config(A )
a = UperNetForSemanticSegmentation(A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a = state_dict.pop(A )
if "bn" in key:
a = key.replace("bn" , "batch_norm" )
a = val
# rename keys
a = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
a = reverse_correct_unfold_reduction_order(A )
if "norm" in key:
a = reverse_correct_unfold_norm_order(A )
model.load_state_dict(A )
# verify on image
a = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
a = Image.open(requests.get(A , stream=A ).raw ).convert("RGB" )
a = SegformerImageProcessor()
a = processor(A , return_tensors="pt" ).pixel_values
with torch.no_grad():
a = model(A )
a = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
a = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
a = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
a = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
a = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowercase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase: int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 71
| 0
|
def A (__A : str , __A : int ) -> list:
"""simple docstring"""
UpperCAmelCase_ = word.split()
def justify(__A : list , __A : int , __A : int ) -> str:
UpperCAmelCase_ = max_width - width
UpperCAmelCase_ = len(__A )
if len(__A ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase_ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase_ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase_ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__A ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase_ = []
for i in range(__A ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__A )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for word in words:
if width + len(__A ) + len(__A ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__A )
width += len(__A )
else:
# justify the line and add it to result
answer.append(justify(__A , __A , __A ) )
# reset new line and new width
UpperCAmelCase_ , UpperCAmelCase_ = [word], len(__A )
UpperCAmelCase_ = max_width - width - len(__A )
answer.append(''' '''.join(__A ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 51
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer
UpperCAmelCase_ : int = XLMRobertaTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = True
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(__lowerCAmelCase) , 1002)
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@cached_property
def a_ ( self):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def a_ ( self):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name)
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase)
lowerCAmelCase = pickle.dumps(__lowerCAmelCase)
pickle.loads(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 272
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[Any] =get_tests_dir("fixtures/test_sentencepiece.model")
_lowercase : Optional[Any] ={'target_lang': 'fi', 'source_lang': 'en'}
_lowercase : List[Any] ='>>zh<<'
_lowercase : int ='Helsinki-NLP/'
if is_torch_available():
_lowercase : Tuple ='pt'
elif is_tf_available():
_lowercase : Any ='tf'
else:
_lowercase : Any ='jax'
@require_sentencepiece
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Optional[int] = MarianTokenizer
__lowerCAmelCase :Union[str, Any] = False
__lowerCAmelCase :List[str] = True
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
super().setUp()
a__ : Dict = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
a__ : Tuple = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
a__ : Dict = Path(self.tmpdirname )
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
a__ : List[str] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = """</s>"""
a__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_snake_case ) , 9 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
a__ : List[Any] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
a__ : str = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(_snake_case , batch.input_ids[0] )
a__ : int = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_snake_case )
a__ : Tuple = [x.name for x in Path(_snake_case ).glob("""*""" )]
self.assertIn("""source.spm""" , _snake_case )
MarianTokenizer.from_pretrained(_snake_case )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = self.get_tokenizer()
a__ : Tuple = tok(
["""I am a small frog""" * 1_0_0_0, """I am a small frog"""] , padding=_snake_case , truncation=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = self.get_tokenizer()
a__ : List[str] = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Any = {"""input_ids""": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
a__ : int = """Tämä on testi"""
a__ : str = """This is a test"""
a__ : Optional[Any] = [7_6, 7, 2_0_4_7, 2]
a__ : Optional[int] = [6_9, 1_2, 1_1, 9_4_0, 2]
a__ : Any = tokenizer(_snake_case ).input_ids
self.assertListEqual(_snake_case , _snake_case )
a__ : Dict = tokenizer(text_target=_snake_case ).input_ids
self.assertListEqual(_snake_case , _snake_case )
a__ : Any = tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 361
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : Union[str, Any] =16
_lowercase : Dict =32
def lowerCAmelCase_ ( _lowercase : Accelerator , _lowercase : int = 16 , _lowercase : str = "bert-base-cased") -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase)
a__ : Any = load_dataset("""glue""" , """mrpc""")
def tokenize_function(_lowercase : str):
# max_length=None => use the model max length (it's actually the default)
a__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ : Any = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_lowercase)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(_lowercase : Optional[Any]):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(_lowercase , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase)
a__ : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase)
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Dict) -> int:
"""simple docstring"""
# Initialize accelerator
a__ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Union[str, Any] = config["""lr"""]
a__ : List[str] = int(config["""num_epochs"""])
a__ : List[str] = int(config["""seed"""])
a__ : Tuple = int(config["""batch_size"""])
a__ : int = args.model_name_or_path
set_seed(_lowercase)
a__ , a__ : int = get_dataloaders(_lowercase , _lowercase , _lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase)
# Instantiate optimizer
a__ : int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowercase)
if accelerator.state.deepspeed_plugin is not None:
a__ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
a__ : List[str] = 1
a__ : List[Any] = (len(_lowercase) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ : Dict = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
a__ : Dict = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : List[Any] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# We need to keep track of how many total steps we have iterated over
a__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
a__ : Optional[int] = 0
# Now we train the model
a__ : Tuple = evaluate.load("""glue""" , """mrpc""")
a__ : List[Any] = 0
a__ : Tuple = {}
for epoch in range(_lowercase , _lowercase):
model.train()
for step, batch in enumerate(_lowercase):
a__ : Union[str, Any] = model(**_lowercase)
a__ : Tuple = outputs.loss
a__ : Any = loss / gradient_accumulation_steps
accelerator.backward(_lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
a__ : int = 0
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a__ : str = model(**_lowercase)
a__ : Union[str, Any] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
a__ , a__ : Optional[int] = accelerator.gather(
(predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase) - 1:
a__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset) - samples_seen]
a__ : Any = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
a__ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase)
a__ : Any = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
a__ : List[str] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""") , """w""") as f:
json.dump(_lowercase , _lowercase)
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=_lowercase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_lowercase , )
parser.add_argument(
"""--output_dir""" , type=_lowercase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_lowercase , default=_lowercase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_lowercase , default=3 , help="""Number of train epochs.""" , )
a__ : Any = parser.parse_args()
a__ : Dict = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_lowercase , _lowercase)
if __name__ == "__main__":
main()
| 266
| 0
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , __a = None , **__a , ) -> str:
super().__init__(
__a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
UpperCamelCase = field
UpperCamelCase = path_or_paths if isinstance(__a , __a ) else {self.split: path_or_paths}
UpperCamelCase = Json(
cache_dir=__a , data_files=__a , features=__a , field=__a , **__a , )
def snake_case_ (self ) -> Any:
# Build iterable dataset
if self.streaming:
UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
def __init__(self , __a , __a , __a = None , __a = None , **__a , ) -> Dict:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
UpperCamelCase = dataset
UpperCamelCase = path_or_buf
UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase = num_proc
UpperCamelCase = "utf-8"
UpperCamelCase = to_json_kwargs
def snake_case_ (self ) -> int:
UpperCamelCase = self.to_json_kwargs.pop("path_or_buf" , __a )
UpperCamelCase = self.to_json_kwargs.pop("orient" , "records" )
UpperCamelCase = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
UpperCamelCase = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
UpperCamelCase = self.to_json_kwargs.pop("compression" , __a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=__a ) as buffer:
UpperCamelCase = self._write(file_obj=__a , orient=__a , lines=__a , index=__a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead." )
UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=__a , lines=__a , index=__a , **self.to_json_kwargs )
return written
def snake_case_ (self , __a ) -> str:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = args
UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=__a , orient=__a , lines=__a , index=__a , **__a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def snake_case_ (self , __a , __a , __a , __a , **__a , ) -> int:
UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__a )
else:
UpperCamelCase , UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(__a )
return written
| 153
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Tuple:
UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCamelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) )
self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCamelCase = get_activation("gelu" )
UpperCamelCase = get_activation("gelu_10" )
UpperCamelCase = torch_builtin(__a )
UpperCamelCase = geluaa(__a )
UpperCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case_ (self ) -> Any:
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__a ):
get_activation("bogus" )
with self.assertRaises(__a ):
get_activation(__a )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = get_activation("gelu" )
UpperCamelCase = 1
UpperCamelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__a ):
UpperCamelCase = acta.a
| 153
| 1
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=sys.maxsize ) -> List[str]:
UpperCAmelCase_ : List[str] = 'bilinear'
UpperCAmelCase_ : List[str] = max_size
UpperCAmelCase_ : List[Any] = short_edge_length
def __call__( self , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = []
for img in imgs:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase_ : Optional[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase_ : int = size * 1.0 / min(_UpperCamelCase , _UpperCamelCase )
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ : int = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = scale * h, size
if max(_UpperCamelCase , _UpperCamelCase ) > self.max_size:
UpperCAmelCase_ : Tuple = self.max_size * 1.0 / max(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = newh * scale
UpperCAmelCase_ : Union[str, Any] = neww * scale
UpperCAmelCase_ : Tuple = int(neww + 0.5 )
UpperCAmelCase_ : List[str] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase_ : int = Image.fromarray(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase_ : Any = np.asarray(_UpperCamelCase )
else:
UpperCAmelCase_ : Optional[Any] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase_ : int = nn.functional.interpolate(
_UpperCamelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCamelCase ).squeeze(0 )
img_augs.append(_UpperCamelCase )
return img_augs
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase_ : Union[str, Any] = cfg.INPUT.FORMAT
UpperCAmelCase_ : str = cfg.SIZE_DIVISIBILITY
UpperCAmelCase_ : Optional[Any] = cfg.PAD_VALUE
UpperCAmelCase_ : Tuple = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase_ : Dict = cfg.MODEL.DEVICE
UpperCAmelCase_ : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase_ : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase_ : str = lambda _UpperCamelCase : (x - self.pixel_mean) / self.pixel_std
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
UpperCAmelCase_ : List[Any] = tuple(max(_UpperCamelCase ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase_ : Union[str, Any] = [im.shape[-2:] for im in images]
UpperCAmelCase_ : Union[str, Any] = [
nn.functional.pad(
_UpperCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCamelCase , _UpperCamelCase )
]
return torch.stack(_UpperCamelCase ), torch.tensor(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[Any]:
with torch.no_grad():
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : int = [images]
if single_image:
assert len(_UpperCamelCase ) == 1
for i in range(len(_UpperCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_UpperCamelCase , images.pop(_UpperCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_UpperCamelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase_ : Optional[int] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase_ : List[str] = self.aug(_UpperCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase_ : Tuple = [self.normalizer(_UpperCamelCase ) for x in images]
# now pad them to do the following operations
UpperCAmelCase_ , UpperCAmelCase_ : str = self.pad(_UpperCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase_ : Dict = torch.true_divide(_UpperCamelCase , _UpperCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowercase__ ( __snake_case : Any , __snake_case : Tuple[int, int] ):
'''simple docstring'''
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case )
| 145
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = '''umt5'''
_snake_case : Union[str, Any] = ['''past_key_values''']
def __init__( self , _UpperCamelCase=2_5_0_1_1_2 , _UpperCamelCase=5_1_2 , _UpperCamelCase=6_4 , _UpperCamelCase=1_0_2_4 , _UpperCamelCase=8 , _UpperCamelCase=None , _UpperCamelCase=6 , _UpperCamelCase=3_2 , _UpperCamelCase=1_2_8 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-6 , _UpperCamelCase=1.0 , _UpperCamelCase="gated-gelu" , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="T5Tokenizer" , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> List[Any]:
super().__init__(
is_encoder_decoder=_UpperCamelCase , tokenizer_class=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[str] = d_model
UpperCAmelCase_ : Any = d_kv
UpperCAmelCase_ : Optional[int] = d_ff
UpperCAmelCase_ : List[Any] = num_layers
UpperCAmelCase_ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Optional[int] = relative_attention_num_buckets
UpperCAmelCase_ : Dict = relative_attention_max_distance
UpperCAmelCase_ : Tuple = dropout_rate
UpperCAmelCase_ : Union[str, Any] = layer_norm_epsilon
UpperCAmelCase_ : Optional[int] = initializer_factor
UpperCAmelCase_ : List[str] = feed_forward_proj
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[Any] = self.feed_forward_proj.split('-' )
UpperCAmelCase_ : List[Any] = act_info[-1]
UpperCAmelCase_ : Union[str, Any] = act_info[0] == 'gated'
if len(_UpperCamelCase ) > 1 and act_info[0] != "gated" or len(_UpperCamelCase ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ) -> int:
return self.d_model
@property
def __UpperCAmelCase ( self ) -> Any:
return self.num_heads
@property
def __UpperCAmelCase ( self ) -> List[Any]:
return self.num_layers
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
UpperCAmelCase_ : Optional[int] = 'past_encoder_sequence + sequence'
UpperCAmelCase_ : str = {0: 'batch'}
UpperCAmelCase_ : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCAmelCase_ : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
UpperCAmelCase_ : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCamelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ) -> int:
return 1_3
@property
def __UpperCAmelCase ( self ) -> float:
return 5E-4
| 145
| 1
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__snake_case = '''pt'''
elif is_tf_available():
__snake_case = '''tf'''
else:
__snake_case = '''jax'''
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = PerceiverTokenizer
A_ : Optional[Any] = False
def _UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
_a = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self ) -> int:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=20 , __UpperCAmelCase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_a = []
for i in range(len(__UpperCAmelCase ) ):
try:
_a = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_a = list(filter(lambda __UpperCAmelCase : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , __UpperCAmelCase ) )
_a = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) )
if max_length is not None and len(__UpperCAmelCase ) > max_length:
_a = toks[:max_length]
if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0:
while len(__UpperCAmelCase ) < min_length:
_a = toks + toks
# toks_str = [t[1] for t in toks]
_a = [t[0] for t in toks]
# Ensure consistency
_a = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
if " " not in output_txt and len(__UpperCAmelCase ) > 1:
_a = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase )
)
if with_prefix_space:
_a = ''' ''' + output_txt
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
return output_txt, output_ids
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.perceiver_tokenizer
_a = '''Unicode €.'''
_a = tokenizer(__UpperCAmelCase )
_a = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , __UpperCAmelCase )
# decoding
_a = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , '''[CLS]Unicode €.[SEP]''' )
_a = tokenizer('''e è é ê ë''' )
_a = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , __UpperCAmelCase )
# decoding
_a = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def _UpperCAmelCase ( self ) -> Any:
_a = self.perceiver_tokenizer
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
_a = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
if FRAMEWORK != "jax":
_a = list(batch.input_ids.numpy()[0] )
else:
_a = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def _UpperCAmelCase ( self ) -> int:
_a = self.perceiver_tokenizer
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __UpperCAmelCase )
self.assertIn('''attention_mask''' , __UpperCAmelCase )
self.assertNotIn('''decoder_input_ids''' , __UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.perceiver_tokenizer
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
_a = tokenizer(
text_target=__UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCAmelCase ( self ) -> Optional[int]:
# safety check on max_len default value so we are sure the test works
_a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_a = tempfile.mkdtemp()
_a = ''' He is very happy, UNwant\u00E9d,running'''
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
_a = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
_a = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
_a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_a = tempfile.mkdtemp()
_a = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
_a = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
_a = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
_a = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_a = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
_a = json.load(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
_a = json.load(__UpperCAmelCase )
_a = [F'<extra_id_{i}>' for i in range(125 )]
_a = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
_a = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_a = tokenizer_class.from_pretrained(
__UpperCAmelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_a = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__UpperCAmelCase )]
_a = tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_a = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_a = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
_a = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
| 320
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 320
| 1
|
'''simple docstring'''
a : List[Any] = 8.3_144_598
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Dict = 300
a : Any = 28
a : str = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 356
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[Any] ):
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[int] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Union[str, Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Any ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Tuple ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : List[str] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
| 72
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = SwinConfig(image_size=192 )
if "base" in model_name:
_lowerCamelCase : Optional[int] = 6
_lowerCamelCase : int = 128
_lowerCamelCase : List[Any] = (2, 2, 18, 2)
_lowerCamelCase : Optional[int] = (4, 8, 16, 32)
elif "large" in model_name:
_lowerCamelCase : int = 12
_lowerCamelCase : str = 192
_lowerCamelCase : Tuple = (2, 2, 18, 2)
_lowerCamelCase : Optional[int] = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : int = embed_dim
_lowerCamelCase : Optional[Any] = depths
_lowerCamelCase : Any = num_heads
return config
def _snake_case ( lowercase__ ):
if "encoder.mask_token" in name:
_lowerCamelCase : Dict = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
_lowerCamelCase : Any = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
_lowerCamelCase : List[str] = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
_lowerCamelCase : Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCamelCase : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCamelCase : Dict = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCamelCase : Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCamelCase : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCamelCase : int = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
_lowerCamelCase : int = 'layernorm.weight'
if name == "encoder.norm.bias":
_lowerCamelCase : List[str] = 'layernorm.bias'
if "decoder" in name:
pass
else:
_lowerCamelCase : Union[str, Any] = 'swin.' + name
return name
def _snake_case ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Any = orig_state_dict.pop(lowercase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
_lowerCamelCase : List[Any] = key.split('.' )
_lowerCamelCase : Dict = int(key_split[2] )
_lowerCamelCase : List[Any] = int(key_split[4] )
_lowerCamelCase : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase : Optional[Any] = val[:dim, :]
_lowerCamelCase : Any = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
_lowerCamelCase : str = val[
:dim
]
_lowerCamelCase : Optional[int] = val[
dim : dim * 2
]
_lowerCamelCase : List[Any] = val[
-dim:
]
else:
_lowerCamelCase : Optional[int] = val
return orig_state_dict
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[Any] = torch.load(lowercase__ , map_location='cpu' )['model']
_lowerCamelCase : Optional[Any] = get_swin_config(lowercase__ )
_lowerCamelCase : Optional[int] = SwinForMaskedImageModeling(lowercase__ )
model.eval()
_lowerCamelCase : List[Any] = convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
_lowerCamelCase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : int = ViTImageProcessor(size={'height': 192, 'width': 192} )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
_lowerCamelCase : Union[str, Any] = image_processor(images=lowercase__ , return_tensors='pt' )
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**lowercase__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase__ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 96
|
import re
def A ( a_ ) -> bool:
__UpperCamelCase : Any =re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(a_ ,a_ ) )
if __name__ == "__main__":
A_ :List[str] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 71
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''transfo-xl'''
__snake_case = ['''mems''']
__snake_case = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , __UpperCAmelCase : Optional[Any]=267_735 , __UpperCAmelCase : str=[20_000, 40_000, 200_000] , __UpperCAmelCase : List[str]=1_024 , __UpperCAmelCase : int=1_024 , __UpperCAmelCase : int=16 , __UpperCAmelCase : List[str]=64 , __UpperCAmelCase : Tuple=4_096 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[int]=18 , __UpperCAmelCase : Optional[Any]=1_600 , __UpperCAmelCase : Tuple=1_000 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Any=-1 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]="normal" , __UpperCAmelCase : Optional[int]=0.01 , __UpperCAmelCase : Optional[int]=0.01 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : List[Any]=0 , **__UpperCAmelCase : int , ) ->Tuple:
"""simple docstring"""
a = vocab_size
a = []
self.cutoffs.extend(__UpperCAmelCase )
if proj_share_all_but_first:
a = [False] + [True] * len(self.cutoffs )
else:
a = [False] + [False] * len(self.cutoffs )
a = d_model
a = d_embed
a = d_head
a = d_inner
a = div_val
a = pre_lnorm
a = n_layer
a = n_head
a = mem_len
a = same_length
a = attn_type
a = clamp_len
a = sample_softmax
a = adaptive
a = dropout
a = dropatt
a = untie_r
a = init
a = init_range
a = proj_init_std
a = init_std
a = layer_norm_epsilon
super().__init__(eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __lowerCAmelCase ( self : str , __UpperCAmelCase : Any ) ->Tuple:
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 354
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
| 0
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a=None , _a=None , _a=0 ) -> List[Any]:
_A : int = 1.0 if scale is None else scale
_A : Dict = 0.0 if loc is None else loc
super().__init__(_a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_a )] )
@property
def a__ ( self ) -> Tuple:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self ) -> int:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self ) -> Tuple:
return self.variance.sqrt()
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , **_a ) -> None:
super().__init__(**_a )
_A : List[Any] = args_dim
_A : Optional[Any] = nn.ModuleList([nn.Linear(_a , _a ) for dim in args_dim.values()] )
_A : Dict = domain_map
def a__ ( self , _a ) -> Tuple[torch.Tensor]:
_A : Optional[Any] = [proj(_a ) for proj in self.proj]
return self.domain_map(*_a )
class lowercase ( nn.Module ):
def __init__( self , _a ) -> List[Any]:
super().__init__()
_A : Any = function
def a__ ( self , _a , *_a ) -> Optional[int]:
return self.function(_a , *_a )
class lowercase :
_a = 42
_a = 42
_a = 42
def __init__( self , _a = 1 ) -> None:
_A : Tuple = dim
_A : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self , _a ) -> Optional[int]:
if self.dim == 1:
return self.distribution_class(*_a )
else:
return Independent(self.distribution_class(*_a ) , 1 )
def a__ ( self , _a , _a = None , _a = None , ) -> Distribution:
_A : Union[str, Any] = self._base_distribution(_a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_a , loc=_a , scale=_a , event_dim=self.event_dim )
@property
def a__ ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self ) -> int:
return len(self.event_shape )
@property
def a__ ( self ) -> float:
return 0.0
def a__ ( self , _a ) -> nn.Module:
return ParameterProjection(
in_features=_a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a__ ( self , *_a ) -> Dict:
raise NotImplementedError()
@staticmethod
def a__ ( _a ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(_a ) + 4.0 )) / 2.0
class lowercase ( UpperCamelCase__ ):
_a = {"df": 1, "loc": 1, "scale": 1}
_a = StudentT
@classmethod
def a__ ( cls , _a , _a , _a ) -> int:
_A : int = cls.squareplus(_a ).clamp_min(torch.finfo(scale.dtype ).eps )
_A : Any = 2.0 + cls.squareplus(_a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase ( UpperCamelCase__ ):
_a = {"loc": 1, "scale": 1}
_a = Normal
@classmethod
def a__ ( cls , _a , _a ) -> List[Any]:
_A : List[str] = cls.squareplus(_a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase ( UpperCamelCase__ ):
_a = {"total_count": 1, "logits": 1}
_a = NegativeBinomial
@classmethod
def a__ ( cls , _a , _a ) -> Union[str, Any]:
_A : List[str] = cls.squareplus(_a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self , _a ) -> Distribution:
_A , _A : Tuple = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_a , logits=_a )
else:
return Independent(self.distribution_class(total_count=_a , logits=_a ) , 1 )
def a__ ( self , _a , _a = None , _a = None ) -> Distribution:
_A , _A : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 26
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ):
__A = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sgugger/tiny-distilbert-classification'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
__A = None
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ):
self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
| 266
| 0
|
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = min(a__ , a__ )
for row in range(a__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , a__ ):
__SCREAMING_SNAKE_CASE = matrix[col][row] / matrix[row][row]
for i in range(a__ , a__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__SCREAMING_SNAKE_CASE = True
for i in range(row + 1 , a__ ):
if matrix[i][row] != 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = matrix[i], matrix[row]
__SCREAMING_SNAKE_CASE = False
break
if reduce:
rank -= 1
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A=12 , _A=7 , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=512 , _A=0.0_2 , _A=0 , _A=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = projection_dim
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = bos_token_id
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__SCREAMING_SNAKE_CASE = input_mask.numpy()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = input_mask.shape
__SCREAMING_SNAKE_CASE = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def _A ( self ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFBlipTextModel(config=_A )
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , training=_A )
__SCREAMING_SNAKE_CASE = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : str = (TFBlipTextModel,) if is_tf_available() else ()
UpperCamelCase__ : int = False
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Tuple = False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BlipTextModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A , hidden_size=37 )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _A ( self ):
'''simple docstring'''
pass
@slow
def _A ( self ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _A ( self , _A=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 118
| 1
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__a = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( a_: str, a_: str ):
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(a_ )
_UpperCAmelCase , _UpperCAmelCase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
a_, output_loading_info=a_ )
else:
_UpperCAmelCase : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(a_ )
_UpperCAmelCase , _UpperCAmelCase : str = ProphetNetForConditionalGeneration.from_pretrained(
a_, output_loading_info=a_ )
_UpperCAmelCase : Tuple = ["key_proj", "value_proj", "query_proj"]
_UpperCAmelCase : Tuple = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase : Optional[Any] = key.split("." )
if attributes[0] == "lm_head":
_UpperCAmelCase : str = prophet
_UpperCAmelCase : int = prophet_old
else:
_UpperCAmelCase : Tuple = prophet.prophetnet
_UpperCAmelCase : Union[str, Any] = prophet_old.model
_UpperCAmelCase : Any = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase : Tuple = mapping[attribute]
if not hasattr(a_, a_ ) and len(a_ ) > 0:
_UpperCAmelCase : Union[str, Any] = attribute
elif hasattr(a_, a_ ):
_UpperCAmelCase : Any = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase : Tuple = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
_UpperCAmelCase : List[str] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase : List[str] = old_model.bias
logger.info(f"""{attribute} is initialized""" )
_UpperCAmelCase : Optional[int] = True
break
elif attribute in special_keys and hasattr(a_, "in_proj_weight" ):
_UpperCAmelCase : Any = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase : List[str] = getattr(a_, a_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase : List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase : Optional[int] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase : List[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_UpperCAmelCase : Tuple = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_UpperCAmelCase : Optional[Any] = True
break
if attribute.isdigit():
_UpperCAmelCase : List[str] = model[int(a_ )]
_UpperCAmelCase : int = old_model[int(a_ )]
else:
_UpperCAmelCase : Any = getattr(a_, a_ )
if old_attribute == "":
_UpperCAmelCase : Union[str, Any] = old_model
else:
if not hasattr(a_, a_ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
_UpperCAmelCase : List[str] = getattr(a_, a_ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 145
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : List[Any] = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : Union[str, Any] = self.column_names
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : Tuple = CsvConfig
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : Union[str, Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : List[Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Tuple = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 145
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowercase : str = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A_ ( A__ ) -> Optional[int]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(A__ , '_dynamo' ):
return False
return isinstance(A__ , torch._dynamo.eval_frame.OptimizedModule )
def A_ ( A__ , A__ = True ) -> int:
a__ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
a__ : Union[str, Any] = is_compiled_module(A__ )
if is_compiled:
a__ : List[str] = model
a__ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__ , A__ ):
a__ : str = model.module
if not keep_fpaa_wrapper:
a__ : Union[str, Any] = getattr(A__ , 'forward' )
a__ : List[Any] = model.__dict__.pop('_original_forward' , A__ )
if original_forward is not None:
while hasattr(A__ , '__wrapped__' ):
a__ : int = forward.__wrapped__
if forward == original_forward:
break
a__ : List[Any] = forward
if getattr(A__ , '_converted_to_transformer_engine' , A__ ):
convert_model(A__ , to_transformer_engine=A__ )
if is_compiled:
a__ : List[str] = model
a__ : Any = compiled_model
return model
def A_ ( ) -> int:
PartialState().wait_for_everyone()
def A_ ( A__ , A__ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__ , A__ )
elif PartialState().local_process_index == 0:
torch.save(A__ , A__ )
@contextmanager
def A_ ( **A__ ) -> Any:
for key, value in kwargs.items():
a__ : Optional[int] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A_ ( A__ ) -> List[str]:
if not hasattr(A__ , '__qualname__' ) and not hasattr(A__ , '__name__' ):
a__ : Dict = getattr(A__ , '__class__' , A__ )
if hasattr(A__ , '__qualname__' ):
return obj.__qualname__
if hasattr(A__ , '__name__' ):
return obj.__name__
return str(A__ )
def A_ ( A__ , A__ ) -> Dict:
for key, value in source.items():
if isinstance(A__ , A__ ):
a__ : Optional[Any] = destination.setdefault(A__ , {} )
merge_dicts(A__ , A__ )
else:
a__ : Optional[int] = value
return destination
def A_ ( A__ = None ) -> bool:
if port is None:
a__ : List[Any] = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 225
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : Union[str, Any] = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
_lowercase : Union[str, Any] = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = RobertaTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowercase_ : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
lowercase_ : Dict = add_prefix_space
lowercase_ : Optional[int] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = add_prefix_space
lowercase_ : Tuple = '''post_processor'''
lowercase_ : Any = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
lowercase_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : List[str] = tuple(state['''sep'''] )
if "cls" in state:
lowercase_ : List[str] = tuple(state['''cls'''] )
lowercase_ : List[Any] = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowercase_ : int = add_prefix_space
lowercase_ : Optional[int] = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
lowercase_ : Tuple = trim_offsets
lowercase_ : int = True
if changes_to_apply:
lowercase_ : Tuple = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
lowercase_ : Optional[Any] = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
lowercase_ : Union[str, Any] = value
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 93
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( _lowercase):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
snake_case__ : Any = BartTokenizer
def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = '''post_processor'''
_lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase : int = tuple(state['''cls'''] )
_lowerCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = add_prefix_space
_lowerCamelCase : Optional[Any] = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : str = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) )
_lowerCamelCase : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_lowerCamelCase : str = value
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72
| 0
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _snake_case ( UpperCamelCase : Callable , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : Any = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Tuple = np.zeros((n + 1,) )
UpperCAmelCase : List[Any] = ya
UpperCAmelCase : Any = xa
for k in range(UpperCamelCase ):
UpperCAmelCase : Optional[Any] = y[k] + step_size * ode_func(UpperCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Dict = 'naver-clova-ix/donut-base-finetuned-docvqa'
__lowerCAmelCase : Dict = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__lowerCAmelCase : Union[str, Any] = 'document_qa'
__lowerCAmelCase : Optional[Any] = AutoProcessor
__lowerCAmelCase : List[Any] = VisionEncoderDecoderModel
__lowerCAmelCase : Union[str, Any] = ['image', 'text']
__lowerCAmelCase : str = ['text']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
UpperCAmelCase : Any = task_prompt.replace("""{user_input}""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.pre_processor.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids
UpperCAmelCase : Optional[Any] = self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_SCREAMING_SNAKE_CASE , ).sequences
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
UpperCAmelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
UpperCAmelCase : Union[str, Any] = re.sub(r"""<.*?>""" , """""" , _SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
UpperCAmelCase : Tuple = self.pre_processor.tokenajson(_SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 76
| 0
|
from __future__ import annotations
import bisect
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0 , UpperCAmelCase = -1 ) -> Tuple:
"""simple docstring"""
if hi < 0:
lowerCamelCase__ : List[Any] = len(snake_case_ )
while lo < hi:
lowerCamelCase__ : str = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCamelCase__ : str = mid + 1
else:
lowerCamelCase__ : Tuple = mid
return lo
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0 , UpperCAmelCase = -1 ) -> Optional[Any]:
"""simple docstring"""
if hi < 0:
lowerCamelCase__ : List[str] = len(snake_case_ )
while lo < hi:
lowerCamelCase__ : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCamelCase__ : Union[str, Any] = mid + 1
else:
lowerCamelCase__ : Any = mid
return lo
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0 , UpperCAmelCase = -1 ) -> int:
"""simple docstring"""
sorted_collection.insert(bisect_left(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0 , UpperCAmelCase = -1 ) -> Optional[Any]:
"""simple docstring"""
sorted_collection.insert(bisect_right(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Optional[Any] = len(snake_case_ ) - 1
while left <= right:
lowerCamelCase__ : List[str] = left + (right - left) // 2
lowerCamelCase__ : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCamelCase__ : List[Any] = midpoint - 1
else:
lowerCamelCase__ : List[str] = midpoint + 1
return None
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : int = bisect.bisect_left(snake_case_ , snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
if right < left:
return None
lowerCamelCase__ : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_ , snake_case_ , snake_case_ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_ , snake_case_ , midpoint + 1 , snake_case_ )
if __name__ == "__main__":
_A : Union[str, Any] = input('Enter numbers separated by comma:\n').strip()
_A : List[Any] = sorted(int(item) for item in user_input.split(','))
_A : Tuple = int(input('Enter a single number to be found in the list:\n'))
_A : List[str] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 142
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Tuple = vocab_size
_A : Union[str, Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Dict = num_attention_heads
_A : List[Any] = hidden_act
_A : Optional[Any] = intermediate_size
_A : Any = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Any = type_vocab_size
_A : List[Any] = initializer_range
_A : int = layer_norm_eps
_A : int = position_embedding_type
_A : Any = use_cache
_A : int = classifier_dropout
_A : int = pre_norm
_A : Optional[Any] = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[int] = adapter_reuse_layer_norm
_A : Any = ln_before_adapter
_A : Union[str, Any] = list(_a )
_A : List[Any] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26
| 0
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase = tuple[int, int, int]
UpperCAmelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCAmelCase = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
UpperCAmelCase = """FOBHMDKEXQNRAULPGSJVTYICZW"""
UpperCAmelCase = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
UpperCAmelCase = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
UpperCAmelCase = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
UpperCAmelCase = """SGLCPQWZHKXAREONTFBVIYJUDM"""
UpperCAmelCase = """HVSICLTYKQUBXDWAJZOMFGPREN"""
UpperCAmelCase = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
UpperCAmelCase = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
UpperCAmelCase = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def lowercase ( a__ : RotorPositionT , a__ : RotorSelectionT , a__ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(a__ ) )) < 3:
_UpperCamelCase = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(a__ )
# Checks if rotor positions are valid
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = rotpos
if not 0 < rotorposa <= len(a__ ):
_UpperCamelCase = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(a__ )
if not 0 < rotorposa <= len(a__ ):
_UpperCamelCase = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(a__ )
if not 0 < rotorposa <= len(a__ ):
_UpperCamelCase = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(a__ )
# Validates string and returns dict
_UpperCamelCase = _plugboard(a__ )
return rotpos, rotsel, pbdict
def lowercase ( a__ : str ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(a__ , a__ ):
_UpperCamelCase = F'''Plugboard setting isn\'t type string ({type(a__ )})'''
raise TypeError(a__ )
elif len(a__ ) % 2 != 0:
_UpperCamelCase = F'''Odd number of symbols ({len(a__ )})'''
raise Exception(a__ )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
_UpperCamelCase = set()
for i in pbstring:
if i not in abc:
_UpperCamelCase = F'''\'{i}\' not in list of symbols'''
raise Exception(a__ )
elif i in tmppbl:
_UpperCamelCase = F'''Duplicate symbol ({i})'''
raise Exception(a__ )
else:
tmppbl.add(a__ )
del tmppbl
# Created the dictionary
_UpperCamelCase = {}
for j in range(0 , len(a__ ) - 1 , 2 ):
_UpperCamelCase = pbstring[j + 1]
_UpperCamelCase = pbstring[j]
return pb
def lowercase ( a__ : str , a__ : RotorPositionT , a__ : RotorSelectionT = (rotora, rotora, rotora) , a__ : str = "" , ) -> str:
_UpperCamelCase = text.upper()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = _validator(
a__ , a__ , plugb.upper() )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = rotor_position
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_UpperCamelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_UpperCamelCase = plugboard[symbol]
# rotor ra --------------------------
_UpperCamelCase = abc.index(a__ ) + rotorposa
_UpperCamelCase = rotora[index % len(a__ )]
# rotor rb --------------------------
_UpperCamelCase = abc.index(a__ ) + rotorposa
_UpperCamelCase = rotora[index % len(a__ )]
# rotor rc --------------------------
_UpperCamelCase = abc.index(a__ ) + rotorposa
_UpperCamelCase = rotora[index % len(a__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_UpperCamelCase = reflector[symbol]
# 2nd rotors
_UpperCamelCase = abc[rotora.index(a__ ) - rotorposa]
_UpperCamelCase = abc[rotora.index(a__ ) - rotorposa]
_UpperCamelCase = abc[rotora.index(a__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_UpperCamelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(a__ ):
_UpperCamelCase = 0
rotorposa += 1
if rotorposa >= len(a__ ):
_UpperCamelCase = 0
rotorposa += 1
if rotorposa >= len(a__ ):
_UpperCamelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(a__ )
return "".join(a__ )
if __name__ == "__main__":
UpperCAmelCase = """This is my Python script that emulates the Enigma machine from WWII."""
UpperCAmelCase = (1, 1, 1)
UpperCAmelCase = """pictures"""
UpperCAmelCase = (rotora, rotora, rotora)
UpperCAmelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 54
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 54
| 1
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
A : List[Any] = "Hello, World!"
A : Tuple = "en_XX"
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = Path("data_bin" )
SCREAMING_SNAKE_CASE_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__UpperCamelCase ).parent ) , checkpoint_file=Path(__UpperCamelCase ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(__UpperCamelCase ) , bpe="sentencepiece" , sentencepiece_model=str(Path(__UpperCamelCase ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
SCREAMING_SNAKE_CASE_ = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = XmodForSequenceClassification(__UpperCamelCase ) if classification_head else XmodForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE_ = xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE_ = xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE_ = xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE_ = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE_ = xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
SCREAMING_SNAKE_CASE_ = xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE_ = xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE_ = xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE_ = xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE_ = xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
SCREAMING_SNAKE_CASE_ = xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE_ = xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE_ = xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
SCREAMING_SNAKE_CASE_ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE_ = xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
SCREAMING_SNAKE_CASE_ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE_ = xmod_layer.fca.bias
SCREAMING_SNAKE_CASE_ = xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE_ = xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE_ = bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE_ = xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE_ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE_ = from_adapter.fca.bias
SCREAMING_SNAKE_CASE_ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE_ = xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE_ = xmod.model.classification_heads["mnli"].dense.weight
SCREAMING_SNAKE_CASE_ = xmod.model.classification_heads["mnli"].dense.bias
SCREAMING_SNAKE_CASE_ = xmod.model.classification_heads["mnli"].out_proj.weight
SCREAMING_SNAKE_CASE_ = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE_ = xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE_ = xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE_ = xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE_ = xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE_ = xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE_ = xmod.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase )[0]
if classification_head:
SCREAMING_SNAKE_CASE_ = xmod.model.classification_heads["mnli"](xmod.extract_features(__UpperCamelCase ) )
else:
SCREAMING_SNAKE_CASE_ = xmod.model(__UpperCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE_ = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
A : Union[str, Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 118
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 118
| 1
|
import datasets
from .evaluate import evaluate
lowercase : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
lowercase : int = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
lowercase : Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self :List[str] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def _lowerCamelCase ( self :str , a :Tuple , a :Optional[int] ) -> Optional[int]:
__UpperCamelCase : List[str] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__UpperCamelCase : Tuple = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__UpperCamelCase : List[Any] = evaluate(dataset=a , predictions=a )
return score
| 151
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : int) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0) == 0)
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert and_gate(0 , 0) == 0
assert and_gate(0 , 1) == 0
assert and_gate(1 , 0) == 0
assert and_gate(1 , 1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 151
| 1
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
_lowerCAmelCase = logging.get_logger()
# the current default level is logging.WARNING
_lowerCAmelCase = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__snake_case )
def lowercase__ ( self : int ) -> str:
_lowerCAmelCase = logging.get_verbosity()
_lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_lowerCAmelCase = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(__snake_case )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowercase__ ( self : Tuple ) -> Tuple:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_lowerCAmelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , __snake_case )
_lowerCAmelCase = logging.log_levels[env_level_str]
_lowerCAmelCase = logging.get_verbosity()
self.assertEqual(
__snake_case , __snake_case , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
_lowerCAmelCase = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowercase__ ( self : Optional[int] ) -> Any:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_lowerCAmelCase = logging.logging.getLogger()
with CaptureLogger(__snake_case ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowercase__ ( self : Dict ) -> Any:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_lowerCAmelCase = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__snake_case ) as cl:
logger.warning_advice(__snake_case )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__snake_case ) as cl:
logger.warning_advice(__snake_case )
self.assertEqual(cl.out , msg + """\n""" )
def UpperCamelCase__ ( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 70
|
from __future__ import annotations
lowerCamelCase__ : Optional[int] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for j in range(i + 1 , __UpperCAmelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE_ = arr[j]
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
for i, outer in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE_ = inner
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [-1] * arr_size
for index in reversed(range(__UpperCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE_ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ : List[str] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 225
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
_lowerCamelCase = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
_lowerCamelCase = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 73
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : Optional[Any] = BlipImageProcessor()
snake_case__ : Optional[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
snake_case__ : List[str] = BlipProcessor(__UpperCamelCase , __UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def __a ( self , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).tokenizer
def __a ( self , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).image_processor
def __a ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case__ : Optional[Any] = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
snake_case__ : List[Any] = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
snake_case__ : Union[str, Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : str = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case__ : Tuple = self.prepare_image_inputs()
snake_case__ : Tuple = image_processor(__UpperCamelCase , return_tensors='np' )
snake_case__ : Dict = processor(images=__UpperCamelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : Optional[Any] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case__ : List[str] = "lower newer"
snake_case__ : Tuple = processor(text=__UpperCamelCase )
snake_case__ : str = tokenizer(__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : List[str] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case__ : Any = "lower newer"
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : Any = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : str = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : List[str] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : Union[str, Any] = processor.batch_decode(__UpperCamelCase )
snake_case__ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : List[str] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case__ : Optional[int] = "lower newer"
snake_case__ : Any = self.prepare_image_inputs()
snake_case__ : Union[str, Any] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 143
|
def lowerCamelCase__ ( _a , _a):
return int((input_a, input_a).count(1) != 0)
def lowerCamelCase__ ( ):
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 76
| 0
|
'''simple docstring'''
import math
import random
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : bool = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE_: Any =0.02
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> float:
'''simple docstring'''
UpperCAmelCase_ = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(snake_case_ ):
# Forward propagation
UpperCAmelCase_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase_ = (expected / 1_00) - layer_a
# Error delta
UpperCAmelCase_ = layer_1_error * sigmoid_function(snake_case_ , snake_case_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: List[Any] =int(input('Expected value: '))
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 106
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Dict ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE_: List[str] =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase_ = None
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
elif name.split("." )[0] == "proj":
UpperCAmelCase_ = fairseq_model.proj
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(snake_case_ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_ : int ) -> List[str]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [line.split(" " )[0] for line in lines]
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(snake_case_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Any , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(snake_case_ )
UpperCAmelCase_ = SpeechaTextaConfig.from_pretrained(
snake_case_ , vocab_size=snake_case_ , decoder_layers=snake_case_ , do_stable_layer_norm=snake_case_ )
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCAmelCase_ = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase_ = WavaVecaModel(snake_case_ )
UpperCAmelCase_ = recursively_load_weights_wavaveca(model.encoder , snake_case_ )
UpperCAmelCase_ = SpeechaTextaForCausalLM(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case_ )
# set output linear layer
unexpected_keys.remove("embed_out" )
UpperCAmelCase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase_ = SpeechEncoderDecoderModel(encoder=snake_case_ , decoder=snake_case_ )
UpperCAmelCase_ = False
# add projection layer
UpperCAmelCase_ = nn.Parameter(projection_layer.weight )
UpperCAmelCase_ = nn.Parameter(projection_layer.bias )
UpperCAmelCase_ = create_vocab_dict(snake_case_ )
with open(os.path.join(snake_case_ , "vocab.json" ) , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
UpperCAmelCase_ = SpeechaTextaTokenizer(os.path.join(snake_case_ , "vocab.json" ) )
tokenizer.save_pretrained(snake_case_ )
UpperCAmelCase_ = hf_wavavec.config.to_dict()
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer.eos_token_id
UpperCAmelCase_ = "speech_to_text_2"
UpperCAmelCase_ = "wav2vec2"
UpperCAmelCase_ = SpeechEncoderDecoderConfig.from_dict(snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
feature_extractor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 106
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ = 1000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 2**power
__SCREAMING_SNAKE_CASE = 0
while n:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 54
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : ArgumentParser ) -> int:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
raise NotImplementedError()
| 54
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357
|
from math import factorial, pi
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCAmelCase_ : Optional[int] = float(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCAmelCase_ : int = float(lowercase__ )
lowerCAmelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 28
| 0
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = tmp_path / 'cache'
UpperCAmelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : int = JsonDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_json_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = tmp_path / 'cache'
UpperCAmelCase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : int = features.copy() if features else default_expected_features
UpperCAmelCase : Optional[int] = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : List[str] = JsonDatasetReader(UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_json_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
UpperCAmelCase : int = features.copy() if features else default_expected_features
UpperCAmelCase : List[Any] = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Union[str, Any] = JsonDatasetReader(UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase : Optional[int] = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
UpperCAmelCase : Tuple = features.copy()
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : int = tmp_path / 'cache'
UpperCAmelCase : Optional[Any] = JsonDatasetReader(UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = tmp_path / 'cache'
UpperCAmelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : Optional[int] = JsonDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , split=UpperCAmelCase_ ).read()
_check_json_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = jsonl_path
elif issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = [jsonl_path]
UpperCAmelCase : List[str] = tmp_path / 'cache'
UpperCAmelCase : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : Any = JsonDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_json_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=("train",) ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for split in splits:
UpperCAmelCase : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = tmp_path / 'cache'
UpperCAmelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Optional[int] = JsonDatasetReader({'train': jsonl_path} , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_json_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = tmp_path / 'cache'
UpperCAmelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
UpperCAmelCase : Optional[int] = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : List[Any] = JsonDatasetReader({'train': jsonl_path} , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_json_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if split:
UpperCAmelCase : Tuple = {split: jsonl_path}
else:
UpperCAmelCase : str = 'train'
UpperCAmelCase : str = {'train': jsonl_path, 'test': jsonl_path}
UpperCAmelCase : int = tmp_path / 'cache'
UpperCAmelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : Dict = JsonDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_json_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase( UpperCAmelCase_ ):
return json.load(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
return [json.loads(UpperCAmelCase_ ) for line in buffer]
class A_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
UpperCAmelCase : List[str] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Dict ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
UpperCAmelCase : Optional[int] = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : int ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : Optional[int] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Any ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : Tuple = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Optional[Any] ) -> List[Any]:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Any ) -> List[str]:
UpperCAmelCase : Any = tmp_path_factory.mktemp('data' ) / f"""test.json.{extension}"""
UpperCAmelCase : Optional[int] = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , 'rb' , compression='infer' ) as f:
UpperCAmelCase : str = f.read()
with fsspec.open(lowercase_ , 'rb' , compression='infer' ) as f:
UpperCAmelCase : Tuple = f.read()
assert exported_content == original_content
| 151
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["YolosFeatureExtractor"]
lowercase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151
| 1
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCamelCase = HUGGINGFACE_HUB_CACHE
__lowerCamelCase = 'config.json'
__lowerCamelCase = 'diffusion_pytorch_model.bin'
__lowerCamelCase = 'diffusion_flax_model.msgpack'
__lowerCamelCase = 'model.onnx'
__lowerCamelCase = 'diffusion_pytorch_model.safetensors'
__lowerCamelCase = 'weights.pb'
__lowerCamelCase = 'https://huggingface.co'
__lowerCamelCase = default_cache_path
__lowerCamelCase = 'diffusers_modules'
__lowerCamelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
__lowerCamelCase = ['fp16', 'non-ema']
__lowerCamelCase = '.self_attn'
| 353
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 101
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = '''mgp-str'''
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str]=[3_2, 1_2_8] ,SCREAMING_SNAKE_CASE__ : Any=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=2_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_8 ,SCREAMING_SNAKE_CASE__ : Tuple=5_0_2_5_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 ,SCREAMING_SNAKE_CASE__ : Dict=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : int=1E-5 ,SCREAMING_SNAKE_CASE__ : List[str]=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,**SCREAMING_SNAKE_CASE__ : int ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Tuple = max_token_length
__lowerCamelCase : Tuple = num_character_labels
__lowerCamelCase : List[str] = num_bpe_labels
__lowerCamelCase : int = num_wordpiece_labels
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : List[str] = mlp_ratio
__lowerCamelCase : List[str] = distilled
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Any = drop_rate
__lowerCamelCase : Tuple = qkv_bias
__lowerCamelCase : Any = attn_drop_rate
__lowerCamelCase : int = drop_path_rate
__lowerCamelCase : str = output_aa_attentions
__lowerCamelCase : int = initializer_range
| 73
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,)
return config, input_ids, attention_mask
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def lowerCAmelCase ( self : int):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73
| 1
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__lowerCamelCase : List[Any] = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a_ = "maskformer"
a_ = {"hidden_size": "mask_feature_size"}
a_ = ["resnet", "swin"]
a_ = ["detr"]
def __init__( self : int , __A : int = 2_5_6 , __A : int = 2_5_6 , __A : float = 0.1 , __A : bool = False , __A : Optional[Dict] = None , __A : Optional[Dict] = None , __A : float = 0.0_2 , __A : float = 1.0 , __A : float = 1.0 , __A : float = 1.0 , __A : float = 2_0.0 , __A : Optional[bool] = None , **__A : Dict , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
snake_case__ : Any = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case__ : Any = backbone_config.pop("model_type" )
snake_case__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
snake_case__ : int = config_class.from_dict(UpperCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
snake_case__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
snake_case__ : Tuple = (
decoder_config.pop("model_type" ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case__ : Union[str, Any] = CONFIG_MAPPING[decoder_type]
snake_case__ : str = config_class.from_dict(UpperCamelCase__ )
snake_case__ : Optional[Any] = backbone_config
snake_case__ : str = decoder_config
# main feature dimension for the model
snake_case__ : Optional[int] = fpn_feature_size
snake_case__ : Optional[int] = mask_feature_size
# initializer
snake_case__ : Dict = init_std
snake_case__ : List[str] = init_xavier_std
# Hungarian matcher && loss
snake_case__ : Union[str, Any] = cross_entropy_weight
snake_case__ : int = dice_weight
snake_case__ : List[str] = mask_weight
snake_case__ : str = use_auxiliary_loss
snake_case__ : Optional[int] = no_object_weight
snake_case__ : Union[str, Any] = output_auxiliary_logits
snake_case__ : Tuple = self.decoder_config.encoder_attention_heads
snake_case__ : List[str] = self.decoder_config.num_hidden_layers
super().__init__(**UpperCamelCase__ )
@classmethod
def _lowercase ( cls : List[str] , __A : PretrainedConfig , __A : PretrainedConfig , **__A : Dict ):
return cls(
backbone_config=UpperCamelCase__ , decoder_config=UpperCamelCase__ , **UpperCamelCase__ , )
def _lowercase ( self : List[Any] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : List[str] = self.backbone_config.to_dict()
snake_case__ : Union[str, Any] = self.decoder_config.to_dict()
snake_case__ : List[str] = self.__class__.model_type
return output
| 367
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : bool , snake_case_ : bool ):
def run_func(snake_case_ : str ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : str , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : List[Any] , **snake_case_ : List[Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ):
snake_case__ : Dict = random.Random()
snake_case__ : List[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = 42
a_ = 42
a_ = "TensorFlow"
@property
def _lowercase ( self : List[str] ):
return tf.__version__
def _lowercase ( self : List[str] , __A : str , __A : int , __A : int ):
# initialize GPU on separate process
snake_case__ : str = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : Dict = self._prepare_inference_func(__A , __A , __A )
return self._measure_speed(_inference )
def _lowercase ( self : Tuple , __A : str , __A : int , __A : int ):
snake_case__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : Any = self._prepare_train_func(__A , __A , __A )
return self._measure_speed(_train )
def _lowercase ( self : List[Any] , __A : str , __A : int , __A : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
snake_case__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : Optional[Any] = self._prepare_inference_func(__A , __A , __A )
return self._measure_memory(_inference )
def _lowercase ( self : str , __A : str , __A : int , __A : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
snake_case__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : int = self._prepare_train_func(__A , __A , __A )
return self._measure_memory(_train )
def _lowercase ( self : Union[str, Any] , __A : str , __A : int , __A : int ):
snake_case__ : int = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
snake_case__ : Tuple = (
hasattr(__A , "architectures" )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : Union[str, Any] = __import__("transformers" , fromlist=[model_class] )
snake_case__ : Any = getattr(__A , __A )
snake_case__ : Dict = model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
snake_case__ : Dict = TF_MODEL_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
snake_case__ : Optional[int] = config.vocab_size if hasattr(__A , "vocab_size" ) else config.encoder.vocab_size
snake_case__ : List[Any] = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__A , decoder_input_ids=__A , training=__A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__A , training=__A )
snake_case__ : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowercase ( self : List[str] , __A : str , __A : int , __A : int ):
snake_case__ : Optional[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
snake_case__ : Any = (
hasattr(__A , "architectures" )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : List[Any] = __import__("transformers" , fromlist=[model_class] )
snake_case__ : Optional[int] = getattr(__A , __A )
snake_case__ : str = model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
snake_case__ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
snake_case__ : Union[str, Any] = config.vocab_size if hasattr(__A , "vocab_size" ) else config.encoder.vocab_size
snake_case__ : List[str] = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
snake_case__ : str = model(__A , decoder_input_ids=__A , labels=__A , training=__A )[0]
snake_case__ : Dict = tf.gradients(__A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
snake_case__ : Optional[Any] = model(__A , labels=__A , training=__A )[0]
snake_case__ : Dict = tf.gradients(__A , model.trainable_variables )
return gradients
snake_case__ : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowercase ( self : int , __A : List[Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
snake_case__ : Optional[Any] = timeit.repeat(
__A , repeat=self.args.repeat , number=1_0 , )
return min(__A ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowercase ( self : str , __A : Callable[[], None] ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
snake_case__ : Optional[int] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
snake_case__ : List[str] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
snake_case__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
snake_case__ : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(__A )
snake_case__ : Optional[int] = meminfo.used
snake_case__ : Any = Memory(__A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
snake_case__ : int = None
else:
snake_case__ : Any = measure_peak_memory_cpu(__A )
snake_case__ : Tuple = Memory(__A ) if isinstance(__A , __A ) else memory_bytes
if self.args.trace_memory_line_by_line:
snake_case__ : Optional[int] = stop_memory_tracing(__A )
if memory is None:
snake_case__ : Dict = summary.total
else:
snake_case__ : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 286
| 0
|
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__UpperCamelCase : Any = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__UpperCamelCase : Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 106
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''CLIPFeatureExtractor''']
__UpperCamelCase : Optional[Any] = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ : Optional[Any] = '''src/diffusers'''
UpperCAmelCase_ : Any = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase_ : List[str] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase_ : Optional[int] = spec.loader.load_module()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Any ) -> int:
"""simple docstring"""
return line.startswith(__magic_name__ ) or len(__magic_name__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , __magic_name__ ) is not None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :List[Any] = object_name.split(""".""" )
UpperCamelCase :Any = 0
# First let's find the module where our object lives.
UpperCamelCase :List[str] = parts[i]
while i < len(__magic_name__ ) and not os.path.isfile(os.path.join(__magic_name__ , f"""{module}.py""" ) ):
i += 1
if i < len(__magic_name__ ):
UpperCamelCase :Dict = os.path.join(__magic_name__ , parts[i] )
if i >= len(__magic_name__ ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__magic_name__ , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase :Dict = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase :Tuple = """"""
UpperCamelCase :List[str] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__magic_name__ ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__magic_name__ ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase :int = line_index
while line_index < len(__magic_name__ ) and _should_continue(lines[line_index] , __magic_name__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase :List[str] = lines[start_index:line_index]
return "".join(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
UpperCAmelCase_ : List[str] = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
UpperCAmelCase_ : Optional[int] = re.compile(R'''<FILL\s+[^>]*>''')
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :int = code.split("""\n""" )
UpperCamelCase :Tuple = 0
while idx < len(__magic_name__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__magic_name__ ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Optional[int] = len(get_indent(__magic_name__ ) ) > 0
if has_indent:
UpperCamelCase :Optional[int] = f"""class Bla:\n{code}"""
UpperCamelCase :str = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__magic_name__ )
UpperCamelCase :List[Any] = black.format_str(__magic_name__ , mode=__magic_name__ )
UpperCamelCase , UpperCamelCase :Dict = style_docstrings_in_code(__magic_name__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : Any=False ) -> str:
"""simple docstring"""
with open(__magic_name__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase :List[Any] = f.readlines()
UpperCamelCase :str = []
UpperCamelCase :Any = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__magic_name__ ):
UpperCamelCase :Any = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = search.groups()
UpperCamelCase :Any = find_code_in_diffusers(__magic_name__ )
UpperCamelCase :Tuple = get_indent(__magic_name__ )
UpperCamelCase :Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase :Optional[Any] = theoretical_indent
UpperCamelCase :Any = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase :str = True
while line_index < len(__magic_name__ ) and should_continue:
line_index += 1
if line_index >= len(__magic_name__ ):
break
UpperCamelCase :Any = lines[line_index]
UpperCamelCase :int = _should_continue(__magic_name__ , __magic_name__ ) and re.search(f"""^{indent}# End copy""" , __magic_name__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase :Dict = lines[start_index:line_index]
UpperCamelCase :List[str] = """""".join(__magic_name__ )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase :Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__magic_name__ ) is None]
UpperCamelCase :Any = """\n""".join(__magic_name__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(__magic_name__ ) > 0:
UpperCamelCase :str = replace_pattern.replace("""with""" , """""" ).split(""",""" )
UpperCamelCase :str = [_re_replace_pattern.search(__magic_name__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = pattern.groups()
UpperCamelCase :List[Any] = re.sub(__magic_name__ , __magic_name__ , __magic_name__ )
if option.strip() == "all-casing":
UpperCamelCase :Optional[Any] = re.sub(obja.lower() , obja.lower() , __magic_name__ )
UpperCamelCase :Any = re.sub(obja.upper() , obja.upper() , __magic_name__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase :Tuple = blackify(lines[start_index - 1] + theoretical_code )
UpperCamelCase :str = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCamelCase :Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase :Dict = start_index + 1
if overwrite and len(__magic_name__ ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__magic_name__ )
return diffs
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bool = False ) -> str:
"""simple docstring"""
UpperCamelCase :int = glob.glob(os.path.join(__magic_name__ , """**/*.py""" ) , recursive=__magic_name__ )
UpperCamelCase :List[str] = []
for filename in all_files:
UpperCamelCase :Optional[Any] = is_copy_consistent(__magic_name__ , __magic_name__ )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__magic_name__ ) > 0:
UpperCamelCase :List[str] = """\n""".join(__magic_name__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase_ : int = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 62
|
from string import ascii_uppercase
UpperCAmelCase_ : str = {str(ord(c) - 55): c for c in ascii_uppercase}
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> str:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
UpperCamelCase :Any = """"""
UpperCamelCase :Any = 0
UpperCamelCase :int = 0
while div != 1:
UpperCamelCase , UpperCamelCase :str = divmod(__magic_name__ , __magic_name__ )
if base >= 11 and 9 < mod < 36:
UpperCamelCase :List[str] = ALPHABET_VALUES[str(__magic_name__ )]
else:
UpperCamelCase :Dict = str(__magic_name__ )
new_value += actual_value
UpperCamelCase :int = num // base
UpperCamelCase :Any = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__magic_name__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 62
| 1
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,)
return config, input_ids, attention_mask
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def lowerCAmelCase ( self : int):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCamelCase : Optional[int] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_lowerCamelCase : Union[str, Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_lowerCamelCase : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_lowerCamelCase : List[Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_lowerCamelCase : List[str] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) )
UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(A__ )
UpperCamelCase = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) )
UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ )
UpperCamelCase = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 28
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase : List[Any] = False
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg")
SCREAMING_SNAKE_CASE_: Tuple = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: str = pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator.manual_seed(0)
SCREAMING_SNAKE_CASE_: List[str] = pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: int = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = "cyberpunk 2077"
SCREAMING_SNAKE_CASE_: Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg")
SCREAMING_SNAKE_CASE_: Dict = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Dict = pipe.dual_guided(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
SCREAMING_SNAKE_CASE_: Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: Dict = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
SCREAMING_SNAKE_CASE_: List[str] = "A painting of a squirrel eating a burger "
SCREAMING_SNAKE_CASE_: str = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = pipe.text_to_image(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy").images
SCREAMING_SNAKE_CASE_: Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
SCREAMING_SNAKE_CASE_: Optional[Any] = pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy").images
SCREAMING_SNAKE_CASE_: Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: int = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
| 127
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/dummy-config.json""")
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = 0
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertIsNotNone(transformers.models.auto.__spec__)
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto"))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.from_pretrained("bert-base-uncased")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.for_model("roberta")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE_: int = os.path.join(lowerCAmelCase__ , "fake-roberta")
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__ , "config.json") , "w") as f:
f.write(json.dumps({}))
SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertEqual(type(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
try:
AutoConfig.register("custom" , lowerCAmelCase__)
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__):
AutoConfig.register("model" , lowerCAmelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__):
AutoConfig.register("bert" , lowerCAmelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_: List[Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _SCREAMING_SNAKE_CASE ( self : List[str]):
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier"):
SCREAMING_SNAKE_CASE_: List[str] = AutoConfig.from_pretrained("bert-base")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
with self.assertRaisesRegex(
lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model")
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfig")
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__)
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = '''new-model'''
try:
AutoConfig.register("new-model" , lowerCAmelCase__)
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model")
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal")
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_: Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal")
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfig")
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 127
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.