code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : Any , a : Optional[Any] , a : int=13 , a : List[Any]=32 , a : Optional[int]=3 , a : List[str]=4 , a : List[Any]=[10, 20, 30, 40] , a : Optional[int]=[2, 2, 3, 2] , a : Any=True , a : Tuple=True , a : int=37 , a : List[str]="gelu" , a : Dict=10 , a : Optional[int]=0.0_2 , a : List[Any]=["stage2", "stage3", "stage4"] , a : Union[str, Any]=3 , a : Tuple=None , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : Any = num_stages
lowerCAmelCase__ : str = hidden_sizes
lowerCAmelCase__ : Dict = depths
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : Optional[Any] = type_sequence_label_size
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : str = out_features
lowerCAmelCase__ : List[Any] = num_labels
lowerCAmelCase__ : Optional[int] = scope
lowerCAmelCase__ : Any = num_stages
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=a , loss_ignore_index=255 , num_labels=self.num_labels , )
def _lowerCamelCase ( self : Tuple , a : Tuple , a : Any , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = UperNetForSemanticSegmentation(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UperNetModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class(a )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(a : str , a : Optional[Any] , a : Optional[Any] ):
lowerCAmelCase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : List[str] = True
check_hidden_states_output(a , a , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Union[str, Any] = _config_zero_init(a )
lowerCAmelCase__ : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(config=a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='UperNet does not have tied weights' )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : int = UperNetForSemanticSegmentation.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
lowerCAmelCase__ : int = Image.open(SCREAMING_SNAKE_CASE_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
lowerCAmelCase__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(a )
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Optional[int] = processor(images=a , return_tensors='pt' ).to(a )
with torch.no_grad():
lowerCAmelCase__ : str = model(**a )
lowerCAmelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Tuple = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a , atol=1E-4 ) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
lowerCAmelCase__ : int = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(a )
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Dict = processor(images=a , return_tensors='pt' ).to(a )
with torch.no_grad():
lowerCAmelCase__ : str = model(**a )
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Any = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a , atol=1E-4 ) )
| 307
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 1
|
lowerCamelCase__ = """Input must be a string of 8 numbers plus letter"""
lowerCamelCase__ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[str] = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE_ ).__name__}'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = spanish_id.replace('-' , '' ).upper()
if len(SCREAMING_SNAKE_CASE_ ) != 9:
raise ValueError(SCREAMING_SNAKE_CASE_ )
try:
lowerCAmelCase__ : Dict = int(spanish_id_clean[0:8] )
lowerCAmelCase__ : int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(SCREAMING_SNAKE_CASE_ ) from ex
if letter.isdigit():
raise ValueError(SCREAMING_SNAKE_CASE_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
return str(SCREAMING_SNAKE_CASE_ ) == str(SCREAMING_SNAKE_CASE_ )[::-1]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
return int(SCREAMING_SNAKE_CASE_ ) + int(str(SCREAMING_SNAKE_CASE_ )[::-1] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10_000 ) -> int:
lowerCAmelCase__ : Union[str, Any] = []
for num in range(1 , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Any = num
while iterations < 50:
lowerCAmelCase__ : int = sum_reverse(SCREAMING_SNAKE_CASE_ )
iterations += 1
if is_palindrome(SCREAMING_SNAKE_CASE_ ):
break
else:
lychrel_nums.append(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 307
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 1
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase__ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Optional[int] , a : Optional[Any] , a : Optional[int]=13 , a : Optional[int]=7 , a : Any=True , a : Tuple=True , a : int=True , a : List[str]=True , a : Dict=99 , a : Dict=16 , a : Dict=36 , a : str=6 , a : Union[str, Any]=6 , a : int=6 , a : Any=37 , a : Dict="gelu" , a : Union[str, Any]=0.1 , a : List[str]=0.1 , a : List[str]=512 , a : Dict=16 , a : Tuple=2 , a : List[Any]=0.0_2 , a : Optional[int]=3 , a : int=4 , a : Optional[Any]=None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Union[str, Any] = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : List[str] = use_token_type_ids
lowerCAmelCase__ : Union[str, Any] = use_labels
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : Any = embedding_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : Dict = num_hidden_groups
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Any = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : Tuple = num_choices
lowerCAmelCase__ : Dict = scope
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowerCamelCase ( self : Optional[Any] , a : Dict , a : int , a : Union[str, Any] , a : Any , a : Optional[int] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = AlbertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , token_type_ids=a )
lowerCAmelCase__ : List[str] = model(a , token_type_ids=a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : List[Any] , a : Optional[Any] , a : Any , a : int , a : int , a : Union[str, Any] , a : int , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = AlbertForPreTraining(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , labels=a , sentence_order_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowerCamelCase ( self : List[str] , a : int , a : Dict , a : Union[str, Any] , a : Optional[Any] , a : Optional[Any] , a : int , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = AlbertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Tuple , a : Tuple , a : Any , a : Dict , a : Dict , a : List[str] , a : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AlbertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Optional[Any] , a : Optional[Any] , a : List[Any] , a : str , a : Dict , a : Any , a : List[Any] , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.num_labels
lowerCAmelCase__ : List[Any] = AlbertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : str , a : str , a : Dict , a : Dict , a : Dict , a : Union[str, Any] , a : Dict , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.num_labels
lowerCAmelCase__ : Any = AlbertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[str] , a : int , a : Any , a : Tuple , a : Tuple , a : List[Any] , a : Dict , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.num_choices
lowerCAmelCase__ : Tuple = AlbertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase__ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _lowerCamelCase ( self : List[str] , a : Optional[Any] , a : List[Any] , a : List[Any]=False ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
lowerCAmelCase__ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
lowerCAmelCase__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = AlbertModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : Optional[Any] = type
self.model_tester.create_and_check_model(*a )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : str = AlbertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowerCAmelCase__ : int = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : Tuple = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
from __future__ import annotations
from math import gcd
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
return (pow(SCREAMING_SNAKE_CASE_ , 2 ) + step) % modulus
for _ in range(SCREAMING_SNAKE_CASE_ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ : int = seed
lowerCAmelCase__ : Dict = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ : Dict = rand_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = rand_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = rand_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ : List[str] = gcd(hare - tortoise , SCREAMING_SNAKE_CASE_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ : List[Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
lowerCamelCase__ = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : Dict = len(SCREAMING_SNAKE_CASE_ )
# We need to create solution object to save path.
lowerCAmelCase__ : Union[str, Any] = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : List[str] = run_maze(SCREAMING_SNAKE_CASE_ , 0 , 0 , SCREAMING_SNAKE_CASE_ )
if solved:
print('\n'.join(str(SCREAMING_SNAKE_CASE_ ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
# Final check point.
if i == j == (size - 1):
lowerCAmelCase__ : Union[str, Any] = 1
return True
lowerCAmelCase__ : int = (not i < 0) and (not j < 0) # Check lower bounds
lowerCAmelCase__ : str = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCAmelCase__ : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCAmelCase__ : Union[str, Any] = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE_ , i + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , j + 1 , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , j - 1 , SCREAMING_SNAKE_CASE_ )
):
return True
lowerCAmelCase__ : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 1
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class A__ ( __magic_name__ ):
lowercase = 'EncodecFeatureExtractor'
lowercase = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[Any] , a : str , a : int ):
'''simple docstring'''
super().__init__(a , a )
lowerCAmelCase__ : List[str] = self.feature_extractor
lowerCAmelCase__ : List[Any] = False
def _lowerCamelCase ( self : Any , a : Union[str, Any]=None , a : List[str]=None , a : List[Any]=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=a , language=a , no_timestamps=a )
def __call__( self : Union[str, Any] , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a , **a )
lowerCAmelCase__ : Any = kwargs.pop('audio' , a )
lowerCAmelCase__ : int = kwargs.pop('sampling_rate' , a )
lowerCAmelCase__ : Optional[int] = kwargs.pop('text' , a )
if len(a ) > 0:
lowerCAmelCase__ : List[str] = args[0]
lowerCAmelCase__ : Dict = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
lowerCAmelCase__ : List[str] = self.tokenizer(a , **a )
if audio is not None:
lowerCAmelCase__ : Union[str, Any] = self.feature_extractor(a , *a , sampling_rate=a , **a )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase__ : Dict = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
lowerCAmelCase__ : Tuple = audio_inputs['padding_mask']
return inputs
def _lowerCamelCase ( self : Any , *a : List[Any] , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = kwargs.pop('audio' , a )
lowerCAmelCase__ : int = kwargs.pop('padding_mask' , a )
if len(a ) > 0:
lowerCAmelCase__ : str = args[0]
lowerCAmelCase__ : int = args[1:]
if audio_values is not None:
return self._decode_audio(a , padding_mask=a )
else:
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : Optional[int] , *a : Tuple , **a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
def _lowerCamelCase ( self : Union[str, Any] , a : Union[str, Any] , a : Optional = None ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = to_numpy(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = audio_values.shape
if padding_mask is None:
return list(a )
lowerCAmelCase__ : Optional[int] = to_numpy(a )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase__ : List[str] = seq_len - padding_mask.shape[-1]
lowerCAmelCase__ : Union[str, Any] = 1 - self.feature_extractor.padding_value
lowerCAmelCase__ : Optional[int] = np.pad(a , ((0, 0), (0, difference)) , 'constant' , constant_values=a )
lowerCAmelCase__ : str = audio_values.tolist()
for i in range(a ):
lowerCAmelCase__ : int = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase__ : int = sliced_audio.reshape(a , -1 )
return audio_values
| 307
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = KandinskyImgaImgPipeline
lowercase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
lowercase = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
lowercase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase = False
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
lowerCAmelCase__ : str = MultilingualCLIP(a )
lowerCAmelCase__ : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ : List[str] = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.dummy_text_encoder
lowerCAmelCase__ : List[str] = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_unet
lowerCAmelCase__ : str = self.dummy_movq
lowerCAmelCase__ : List[str] = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase__ : Dict = DDIMScheduler(**a )
lowerCAmelCase__ : Any = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _lowerCamelCase ( self : str , a : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
lowerCAmelCase__ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
lowerCAmelCase__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a ) ).to(a )
lowerCAmelCase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : int = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((256, 256) )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : int = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Union[str, Any] = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = pipe(**self.get_dummy_inputs(a ) )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : Optional[int] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
lowerCAmelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ : List[Any] = 'A red cartoon frog, 4k'
lowerCAmelCase__ : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
lowerCAmelCase__ : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
lowerCAmelCase__ : str = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ : List[Any] = pipeline(
a , image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
lowerCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a , a )
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : Optional[int] = [0] * len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[Any] = [1] * len(SCREAMING_SNAKE_CASE_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCAmelCase__ : Optional[int] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
print(max(SCREAMING_SNAKE_CASE_ ) )
# Adjacency list of Graph
lowerCamelCase__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = [[float('inf' ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE_ ):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE_ ):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE_ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCAmelCase__ : int = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return dist, v
if __name__ == "__main__":
lowerCamelCase__ = int(input("""Enter number of vertices: """))
lowerCamelCase__ = int(input("""Enter number of edges: """))
lowerCamelCase__ = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
lowerCamelCase__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
lowerCamelCase__ = int(input("""Enter source:"""))
lowerCamelCase__ = int(input("""Enter destination:"""))
lowerCamelCase__ = float(input("""Enter weight:"""))
lowerCamelCase__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 307
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 1
|
# using dfs for finding eulerian path traversal
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Any:
lowerCAmelCase__ : int = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = True, True
lowerCAmelCase__ : List[Any] = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : str = -1
for i in range(SCREAMING_SNAKE_CASE_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase__ : List[str] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Optional[int] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = check_circuit_or_path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
lowerCAmelCase__ : str = 1
if check == 2:
lowerCAmelCase__ : Any = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
lowerCAmelCase__ : List[Any] = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase__ : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase__ : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase__ : Tuple = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase__ : Optional[Any] = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase__ : Optional[int] = 10
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 307
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'deta'
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , a : Optional[Any]=None , a : List[str]=900 , a : Optional[Any]=2_048 , a : Tuple=6 , a : Union[str, Any]=2_048 , a : int=8 , a : Optional[int]=6 , a : Dict=1_024 , a : List[str]=8 , a : Dict=0.0 , a : Optional[int]=True , a : Optional[int]="relu" , a : Optional[int]=256 , a : str=0.1 , a : Union[str, Any]=0.0 , a : List[str]=0.0 , a : Any=0.0_2 , a : List[str]=1.0 , a : str=True , a : Dict=False , a : str="sine" , a : str=5 , a : Dict=4 , a : Dict=4 , a : Dict=True , a : Any=300 , a : str=True , a : List[str]=True , a : Dict=1 , a : int=5 , a : Any=2 , a : List[Any]=1 , a : Optional[int]=1 , a : List[str]=5 , a : Union[str, Any]=2 , a : Tuple=0.1 , a : Dict=0.2_5 , **a : Optional[int] , ):
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase__ : str = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a , a ):
lowerCAmelCase__ : List[str] = backbone_config.pop('model_type' )
lowerCAmelCase__ : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ : Dict = config_class.from_dict(a )
lowerCAmelCase__ : Optional[Any] = backbone_config
lowerCAmelCase__ : Optional[int] = num_queries
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[Any] = d_model
lowerCAmelCase__ : Any = encoder_ffn_dim
lowerCAmelCase__ : List[Any] = encoder_layers
lowerCAmelCase__ : List[str] = encoder_attention_heads
lowerCAmelCase__ : Optional[Any] = decoder_ffn_dim
lowerCAmelCase__ : List[Any] = decoder_layers
lowerCAmelCase__ : List[Any] = decoder_attention_heads
lowerCAmelCase__ : List[str] = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : List[Any] = activation_dropout
lowerCAmelCase__ : Optional[Any] = activation_function
lowerCAmelCase__ : int = init_std
lowerCAmelCase__ : List[str] = init_xavier_std
lowerCAmelCase__ : str = encoder_layerdrop
lowerCAmelCase__ : List[str] = auxiliary_loss
lowerCAmelCase__ : str = position_embedding_type
# deformable attributes
lowerCAmelCase__ : Any = num_feature_levels
lowerCAmelCase__ : Dict = encoder_n_points
lowerCAmelCase__ : Dict = decoder_n_points
lowerCAmelCase__ : str = two_stage
lowerCAmelCase__ : Tuple = two_stage_num_proposals
lowerCAmelCase__ : str = with_box_refine
lowerCAmelCase__ : Dict = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase__ : List[str] = class_cost
lowerCAmelCase__ : Dict = bbox_cost
lowerCAmelCase__ : Tuple = giou_cost
# Loss coefficients
lowerCAmelCase__ : Union[str, Any] = mask_loss_coefficient
lowerCAmelCase__ : Union[str, Any] = dice_loss_coefficient
lowerCAmelCase__ : str = bbox_loss_coefficient
lowerCAmelCase__ : List[str] = giou_loss_coefficient
lowerCAmelCase__ : Union[str, Any] = eos_coefficient
lowerCAmelCase__ : List[str] = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return self.d_model
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Optional[Any] = self.backbone_config.to_dict()
lowerCAmelCase__ : str = self.__class__.model_type
return output
| 307
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class A__ ( __magic_name__ ):
lowercase = 'gptsan-japanese'
lowercase = [
'past_key_values',
]
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , a : str=36_000 , a : Optional[Any]=1_280 , a : Union[str, Any]=1_024 , a : Any=8_192 , a : Dict=4_096 , a : List[str]=128 , a : int=10 , a : Dict=0 , a : int=16 , a : Dict=16 , a : str=128 , a : int=0.0 , a : Optional[int]=1E-5 , a : Union[str, Any]=False , a : Any=0.0 , a : Any="float32" , a : List[str]=False , a : Optional[int]=False , a : Dict=False , a : Dict=0.0_0_2 , a : str=False , a : List[str]=True , a : Dict=35_998 , a : Union[str, Any]=35_995 , a : Dict=35_999 , **a : Union[str, Any] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = d_model
lowerCAmelCase__ : Optional[Any] = d_ff
lowerCAmelCase__ : str = d_ext
lowerCAmelCase__ : str = d_spout
lowerCAmelCase__ : Any = num_switch_layers
lowerCAmelCase__ : List[Any] = num_ext_layers
lowerCAmelCase__ : List[str] = num_switch_layers + num_ext_layers
lowerCAmelCase__ : Union[str, Any] = num_heads
lowerCAmelCase__ : Dict = num_experts
lowerCAmelCase__ : int = expert_capacity
lowerCAmelCase__ : List[str] = dropout_rate
lowerCAmelCase__ : List[Any] = layer_norm_epsilon
lowerCAmelCase__ : Dict = router_bias
lowerCAmelCase__ : Union[str, Any] = router_jitter_noise
lowerCAmelCase__ : Tuple = router_dtype
lowerCAmelCase__ : Tuple = router_ignore_padding_tokens
lowerCAmelCase__ : Optional[int] = output_hidden_states
lowerCAmelCase__ : Union[str, Any] = output_attentions
lowerCAmelCase__ : str = initializer_factor
lowerCAmelCase__ : Optional[Any] = output_router_logits
lowerCAmelCase__ : Optional[Any] = use_cache
super().__init__(
separator_token_id=a , pad_token_id=a , eos_token_id=a , **a , )
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : str = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=SCREAMING_SNAKE_CASE_ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=SCREAMING_SNAKE_CASE_ )
return parser.parse_args()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : Optional[int] = parse_args()
# Import training_script as a module.
lowerCAmelCase__ : List[str] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase__ : List[Any] = script_fpath.stem
lowerCAmelCase__ : Dict = importlib.import_module(SCREAMING_SNAKE_CASE_ )
# Patch sys.argv
lowerCAmelCase__ : Optional[int] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 307
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 1
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = OpenAIGPTTokenizer
lowercase = OpenAIGPTTokenizerFast
lowercase = True
lowercase = False
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase__ : List[Any] = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : int = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any] ):
'''simple docstring'''
return "lower newer", "lower newer"
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Optional[int] = 'lower'
lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>']
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Optional[int] = tokens + ['<unk>']
lowerCAmelCase__ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _lowerCamelCase ( self : Dict , a : List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(a , **a )
# Simple input
lowerCAmelCase__ : Optional[int] = 'This is a simple input'
lowerCAmelCase__ : int = ['This is a simple input 1', 'This is a simple input 2']
lowerCAmelCase__ : int = ('This is a simple input', 'This is a pair')
lowerCAmelCase__ : List[str] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A__ ( __magic_name__ ):
pass
| 307
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 1
|
from __future__ import annotations
import math
import random
from typing import Any
class A__ :
def __init__( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : list[Any] = []
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : int = 0
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.head == self.tail
def _lowerCamelCase ( self : Tuple , a : Any ):
'''simple docstring'''
self.data.append(a )
lowerCAmelCase__ : int = self.tail + 1
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.data[self.head]
lowerCAmelCase__ : Optional[int] = self.head + 1
return ret
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return self.tail - self.head
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class A__ :
def __init__( self : List[Any] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = data
lowerCAmelCase__ : MyNode | None = None
lowerCAmelCase__ : MyNode | None = None
lowerCAmelCase__ : int = 1
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.data
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return self.left
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.right
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.height
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = data
def _lowerCamelCase ( self : List[Any] , a : MyNode | None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = node
def _lowerCamelCase ( self : List[str] , a : MyNode | None ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = node
def _lowerCamelCase ( self : List[Any] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = height
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
if node is None:
return 0
return node.get_height()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if a > b:
return a
return b
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> MyNode:
print('left rotation node:' , node.get_data() )
lowerCAmelCase__ : Optional[Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(SCREAMING_SNAKE_CASE_ )
return ret
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> MyNode:
print('right rotation node:' , node.get_data() )
lowerCAmelCase__ : str = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(SCREAMING_SNAKE_CASE_ )
return ret
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> MyNode:
lowerCAmelCase__ : Any = node.get_left()
assert left_child is not None
node.set_left(left_rotation(SCREAMING_SNAKE_CASE_ ) )
return right_rotation(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> MyNode:
lowerCAmelCase__ : Dict = node.get_right()
assert right_child is not None
node.set_right(right_rotation(SCREAMING_SNAKE_CASE_ ) )
return left_rotation(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> MyNode | None:
if node is None:
return MyNode(SCREAMING_SNAKE_CASE_ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , SCREAMING_SNAKE_CASE_ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowerCAmelCase__ : Optional[int] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowerCAmelCase__ : Tuple = right_rotation(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : str = lr_rotation(SCREAMING_SNAKE_CASE_ )
else:
node.set_right(insert_node(node.get_right() , SCREAMING_SNAKE_CASE_ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowerCAmelCase__ : Tuple = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowerCAmelCase__ : Tuple = rl_rotation(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Union[str, Any] = left_rotation(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(SCREAMING_SNAKE_CASE_ )
return node
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
while True:
lowerCAmelCase__ : Optional[int] = root.get_right()
if right_child is None:
break
lowerCAmelCase__ : Optional[Any] = right_child
return root.get_data()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
while True:
lowerCAmelCase__ : Tuple = root.get_left()
if left_child is None:
break
lowerCAmelCase__ : List[Any] = left_child
return root.get_data()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> MyNode | None:
lowerCAmelCase__ : str = root.get_left()
lowerCAmelCase__ : Dict = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowerCAmelCase__ : Union[str, Any] = get_left_most(SCREAMING_SNAKE_CASE_ )
root.set_data(SCREAMING_SNAKE_CASE_ )
root.set_right(del_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
elif left_child is not None:
lowerCAmelCase__ : Optional[Any] = left_child
elif right_child is not None:
lowerCAmelCase__ : Optional[int] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if get_height(SCREAMING_SNAKE_CASE_ ) - get_height(SCREAMING_SNAKE_CASE_ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowerCAmelCase__ : Dict = left_rotation(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : List[str] = rl_rotation(SCREAMING_SNAKE_CASE_ )
elif get_height(SCREAMING_SNAKE_CASE_ ) - get_height(SCREAMING_SNAKE_CASE_ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowerCAmelCase__ : Optional[int] = right_rotation(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Dict = lr_rotation(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(SCREAMING_SNAKE_CASE_ )
return root
class A__ :
def __init__( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : MyNode | None = None
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return get_height(self.root )
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
print('insert:' + str(a ) )
lowerCAmelCase__ : Optional[Any] = insert_node(self.root , a )
def _lowerCamelCase ( self : Tuple , a : Any ):
'''simple docstring'''
print('delete:' + str(a ) )
if self.root is None:
print('Tree is empty!' )
return
lowerCAmelCase__ : List[str] = del_node(self.root , a )
def __str__( self : Dict , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
lowerCAmelCase__ : int = ''
lowerCAmelCase__ : int = MyQueue()
q.push(self.root )
lowerCAmelCase__ : Any = self.get_height()
if layer == 0:
return output
lowerCAmelCase__ : Optional[Any] = 0
while not q.is_empty():
lowerCAmelCase__ : Any = q.pop()
lowerCAmelCase__ : str = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(a )
q.push(a )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowerCAmelCase__ : str = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , a ) - 1:
lowerCAmelCase__ : Dict = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCAmelCase__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowerCamelCase__ = AVLtree()
lowerCamelCase__ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 307
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 1
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCAmelCase__ ( ) -> str:
lowerCAmelCase__ : Any = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase__ : Optional[Any] = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ )
try:
lowerCAmelCase__ : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase__ : Tuple = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
lowerCAmelCase__ : List[Any] = ' '.join(str(SCREAMING_SNAKE_CASE_ ).split(' ' )[:-1] )
lowerCAmelCase__ : List[Any] = ''
lowerCAmelCase__ : List[str] = eval(str(SCREAMING_SNAKE_CASE_ ).split(' ' )[-1] )
lowerCAmelCase__ : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : List[Any] = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ )
raise ValueError(SCREAMING_SNAKE_CASE_ )
benchmark.run()
if __name__ == "__main__":
main()
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : Optional[int] = get_failure_array(SCREAMING_SNAKE_CASE_ )
# 2) Step through text searching for pattern
lowerCAmelCase__ , lowerCAmelCase__ : Dict = 0, 0 # index into text, pattern
while i < len(SCREAMING_SNAKE_CASE_ ):
if pattern[j] == text[i]:
if j == (len(SCREAMING_SNAKE_CASE_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCAmelCase__ : Union[str, Any] = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowerCAmelCase__ : Any = [0]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = 1
while j < len(SCREAMING_SNAKE_CASE_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCAmelCase__ : Optional[Any] = failure[i - 1]
continue
j += 1
failure.append(SCREAMING_SNAKE_CASE_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase__ = """abc1abc12"""
lowerCamelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCamelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase__ = """ABABX"""
lowerCamelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
lowerCamelCase__ = """AAAB"""
lowerCamelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
lowerCamelCase__ = """abcdabcy"""
lowerCamelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
lowerCamelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
lowerCAmelCase__ : List[Any] = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
lowerCAmelCase__ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['module']
# Load the entity vocab file
lowerCAmelCase__ : Optional[Any] = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ )
# add an entry for [MASK2]
lowerCAmelCase__ : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCAmelCase__ : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase__ : Optional[int] = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'r' ) as f:
lowerCAmelCase__ : Dict = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = 'MLukeTokenizer'
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
lowerCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(['@'] )[0]
lowerCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(['#'] )[0]
lowerCAmelCase__ : Optional[int] = state_dict['embeddings.word_embeddings.weight']
lowerCAmelCase__ : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
lowerCAmelCase__ : List[Any] = word_emb[enta_init_index].unsqueeze(0 )
lowerCAmelCase__ : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCAmelCase__ : Union[str, Any] = state_dict[bias_name]
lowerCAmelCase__ : Union[str, Any] = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCAmelCase__ : Dict = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCAmelCase__ : Tuple = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase__ : List[Any] = F'''encoder.layer.{layer_index}.attention.self.'''
lowerCAmelCase__ : Any = state_dict[prefix + matrix_name]
lowerCAmelCase__ : Optional[Any] = state_dict[prefix + matrix_name]
lowerCAmelCase__ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase__ : int = state_dict['entity_embeddings.entity_embeddings.weight']
lowerCAmelCase__ : Tuple = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
lowerCAmelCase__ : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCAmelCase__ : List[str] = state_dict['entity_predictions.bias']
lowerCAmelCase__ : Optional[Any] = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
lowerCAmelCase__ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCAmelCase__ : Optional[int] = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
lowerCAmelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
lowerCAmelCase__ : str = state_dict[key]
else:
lowerCAmelCase__ : Tuple = state_dict[key]
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(SCREAMING_SNAKE_CASE_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCAmelCase__ : Any = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task='entity_classification' )
lowerCAmelCase__ : Tuple = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
lowerCAmelCase__ : Tuple = (0, 9)
lowerCAmelCase__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
lowerCAmelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 33, 768) )
lowerCAmelCase__ : Tuple = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase__ : List[Any] = torch.Size((1, 1, 768) )
lowerCAmelCase__ : Dict = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCAmelCase__ : int = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = 'Tokyo is the capital of <mask>.'
lowerCAmelCase__ : str = (24, 30)
lowerCAmelCase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
lowerCAmelCase__ : str = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = encoding['input_ids'][0].tolist()
lowerCAmelCase__ : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
lowerCAmelCase__ : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = outputs.entity_logits[0][0].argmax().item()
lowerCAmelCase__ : str = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[int] = ['[MASK]', '[PAD]', '[UNK]']
lowerCAmelCase__ : str = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : Any = {}
for entry in data:
lowerCAmelCase__ : str = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCAmelCase__ : int = entity_id
break
lowerCAmelCase__ : Any = F'''{language}:{entity_name}'''
lowerCAmelCase__ : List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 307
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
class A__ ( Generic[T] ):
def __init__( self : List[Any] , a : list[T] , a : Callable[[T, T], T] ):
'''simple docstring'''
lowerCAmelCase__ : Any | T = None
lowerCAmelCase__ : int = len(a )
lowerCAmelCase__ : list[T] = [any_type for _ in range(self.N )] + arr
lowerCAmelCase__ : List[Any] = fnc
self.build()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
lowerCAmelCase__ : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _lowerCamelCase ( self : Dict , a : int , a : T ):
'''simple docstring'''
p += self.N
lowerCAmelCase__ : Optional[int] = v
while p > 1:
lowerCAmelCase__ : Optional[int] = p // 2
lowerCAmelCase__ : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _lowerCamelCase ( self : Optional[Any] , a : int , a : int ): # noqa: E741
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = l + self.N, r + self.N
lowerCAmelCase__ : T | None = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase__ : int = self.st[l] if res is None else self.fn(a , self.st[l] )
if r % 2 == 0:
lowerCAmelCase__ : Union[str, Any] = self.st[r] if res is None else self.fn(a , self.st[r] )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCamelCase__ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowerCamelCase__ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowerCamelCase__ = SegmentTree(test_array, min)
lowerCamelCase__ = SegmentTree(test_array, max)
lowerCamelCase__ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCAmelCase__ ( ) -> None:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : str = reduce(SCREAMING_SNAKE_CASE_ , test_array[i : j + 1] )
lowerCAmelCase__ : str = reduce(SCREAMING_SNAKE_CASE_ , test_array[i : j + 1] )
lowerCAmelCase__ : Dict = reduce(lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert max_range == max_segment_tree.query(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert sum_range == sum_segment_tree.query(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
test_all_segments()
for index, value in test_updates.items():
lowerCamelCase__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 307
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
lowercase = ['pixel_values']
def __init__( self : Dict , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , ):
'''simple docstring'''
super().__init__(**a )
lowerCAmelCase__ : List[Any] = size if size is not None else {'shortest_edge': 256}
lowerCAmelCase__ : Optional[int] = get_size_dict(a , default_to_square=a )
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCAmelCase__ : List[str] = get_size_dict(a )
lowerCAmelCase__ : Any = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Optional[int] = resample
lowerCAmelCase__ : Union[str, Any] = do_center_crop
lowerCAmelCase__ : List[Any] = crop_size
lowerCAmelCase__ : Union[str, Any] = do_rescale
lowerCAmelCase__ : Any = rescale_factor
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCAmelCase__ : Optional[Any] = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def _lowerCamelCase ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ):
'''simple docstring'''
lowerCAmelCase__ : str = get_size_dict(a )
return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a )
def _lowerCamelCase ( self : str , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : int ):
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def _lowerCamelCase ( self : int , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ):
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def _lowerCamelCase ( self : Optional[int] , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Union[str, Any] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Tuple = size if size is not None else self.size
lowerCAmelCase__ : Optional[int] = get_size_dict(a , default_to_square=a )
lowerCAmelCase__ : Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : List[Any] = get_size_dict(a )
lowerCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : str = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : Optional[Any] = [to_numpy_array(a ) for image in images]
if do_resize:
lowerCAmelCase__ : List[str] = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Union[str, Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
lowerCAmelCase__ : Any = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowerCAmelCase__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowerCAmelCase__ : Tuple = [to_channel_dimension_format(a , a ) for image in images]
lowerCAmelCase__ : int = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 307
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
while a != 0:
lowerCAmelCase__ , lowerCAmelCase__ : str = b % a, a
return b
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) != 1:
lowerCAmelCase__ : int = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 1, 0, a
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = 0, 1, m
while va != 0:
lowerCAmelCase__ : Union[str, Any] = ua // va
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 307
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 1
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = inspect.getfile(accelerate.test_utils )
lowerCAmelCase__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCAmelCase__ : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
lowerCAmelCase__ : Tuple = [sys.executable] + distributed_args
execute_subprocess_async(a , env=os.environ.copy() )
| 307
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowerCamelCase__ = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
lowerCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase__ = dict(zip(vocab, range(len(vocab))))
lowerCamelCase__ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = Path(tmpdirname)
lowerCamelCase__ = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
lowerCamelCase__ = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
lowerCamelCase__ = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
lowerCamelCase__ = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowerCamelCase__ = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowerCamelCase__ = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
lowerCamelCase__ = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
lowerCamelCase__ = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(SCREAMING_SNAKE_CASE_ , n - 1 , SCREAMING_SNAKE_CASE_ ) * a) % mod
else:
lowerCAmelCase__ : str = binary_exponentiation(SCREAMING_SNAKE_CASE_ , n / 2 , SCREAMING_SNAKE_CASE_ )
return (b * b) % mod
# a prime number
lowerCamelCase__ = 701
lowerCamelCase__ = 10_0000_0000
lowerCamelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class A__ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self : List[str] , a : Union[str, Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
super().__init__(features=a )
lowerCAmelCase__ : Any = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowerCamelCase ( self : Dict , a : Tuple ):
'''simple docstring'''
import torch
if isinstance(a , a ) and column:
if all(
isinstance(a , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a )
return column
def _lowerCamelCase ( self : Tuple , a : Tuple ):
'''simple docstring'''
import torch
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase__ : List[str] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCAmelCase__ : List[str] = {'dtype': torch.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase__ : Optional[int] = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : Dict = np.asarray(a )
return torch.tensor(a , **{**default_dtype, **self.torch_tensor_kwargs} )
def _lowerCamelCase ( self : List[str] , a : Optional[int] ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a , '__array__' ) and not isinstance(a , torch.Tensor ):
lowerCAmelCase__ : int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def _lowerCamelCase ( self : str , a : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , a , map_list=a )
def _lowerCamelCase ( self : str , a : pa.Table ):
'''simple docstring'''
lowerCAmelCase__ : int = self.numpy_arrow_extractor().extract_row(a )
lowerCAmelCase__ : Tuple = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def _lowerCamelCase ( self : Optional[Any] , a : pa.Table ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.numpy_arrow_extractor().extract_column(a )
lowerCAmelCase__ : Tuple = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
lowerCAmelCase__ : Optional[Any] = self.recursive_tensorize(a )
lowerCAmelCase__ : int = self._consolidate(a )
return column
def _lowerCamelCase ( self : Dict , a : pa.Table ):
'''simple docstring'''
lowerCAmelCase__ : int = self.numpy_arrow_extractor().extract_batch(a )
lowerCAmelCase__ : List[Any] = self.python_features_decoder.decode_batch(a )
lowerCAmelCase__ : Optional[int] = self.recursive_tensorize(a )
for column_name in batch:
lowerCAmelCase__ : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 307
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
while second != 0:
lowerCAmelCase__ : List[Any] = first & second
first ^= second
lowerCAmelCase__ : Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = int(input("""Enter the first number: """).strip())
lowerCamelCase__ = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCamelCase__ = CLIPImageProcessor()
lowerCamelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowerCamelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
from math import isclose, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[float, float, float]:
lowerCAmelCase__ : Any = point_y / 4 / point_x
lowerCAmelCase__ : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase__ : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase__ : str = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase__ : List[Any] = outgoing_gradient**2 + 4
lowerCAmelCase__ : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase__ : Dict = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase__ : Optional[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase__ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase__ : List[Any] = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus
lowerCAmelCase__ : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1.4 , SCREAMING_SNAKE_CASE_ = -9.6 ) -> int:
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : float = first_x_coord
lowerCAmelCase__ : float = first_y_coord
lowerCAmelCase__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 307
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A__ ( __magic_name__ ):
lowercase = 'perceiver'
def __init__( self : List[Any] , a : Dict=256 , a : List[Any]=1_280 , a : Dict=768 , a : Union[str, Any]=1 , a : Union[str, Any]=26 , a : Tuple=8 , a : str=8 , a : List[str]=None , a : str=None , a : List[Any]="kv" , a : int=1 , a : Any=1 , a : List[Any]="gelu" , a : Optional[Any]=0.1 , a : List[str]=0.0_2 , a : List[str]=1E-12 , a : List[str]=True , a : Optional[int]=262 , a : Dict=2_048 , a : Optional[Any]=56 , a : Dict=[368, 496] , a : List[str]=16 , a : int=1_920 , a : Any=16 , a : List[Any]=[1, 16, 224, 224] , **a : List[str] , ):
'''simple docstring'''
super().__init__(**a )
lowerCAmelCase__ : Dict = num_latents
lowerCAmelCase__ : List[str] = d_latents
lowerCAmelCase__ : Optional[int] = d_model
lowerCAmelCase__ : List[str] = num_blocks
lowerCAmelCase__ : Dict = num_self_attends_per_block
lowerCAmelCase__ : Any = num_self_attention_heads
lowerCAmelCase__ : Optional[Any] = num_cross_attention_heads
lowerCAmelCase__ : Optional[Any] = qk_channels
lowerCAmelCase__ : Optional[Any] = v_channels
lowerCAmelCase__ : int = cross_attention_shape_for_attention
lowerCAmelCase__ : int = self_attention_widening_factor
lowerCAmelCase__ : int = cross_attention_widening_factor
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Optional[Any] = use_query_residual
# masked language modeling attributes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Optional[int] = max_position_embeddings
# image classification attributes
lowerCAmelCase__ : Tuple = image_size
# flow attributes
lowerCAmelCase__ : Dict = train_size
# multimodal autoencoding attributes
lowerCAmelCase__ : Union[str, Any] = num_frames
lowerCAmelCase__ : Optional[Any] = audio_samples_per_frame
lowerCAmelCase__ : int = samples_per_patch
lowerCAmelCase__ : str = output_shape
class A__ ( __magic_name__ ):
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return 1E-4
def _lowerCamelCase ( self : Optional[Any] , a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a : int = -1 , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , a : int = 3 , a : int = 40 , a : int = 40 , ):
'''simple docstring'''
if isinstance(a , a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Optional[Any] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ : List[str] = preprocessor.num_special_tokens_to_add(a )
lowerCAmelCase__ : List[str] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ : Dict = [' '.join(['a'] ) * seq_length] * batch_size
lowerCAmelCase__ : int = dict(preprocessor(a , return_tensors=a ) )
lowerCAmelCase__ : Any = inputs.pop('input_ids' )
return inputs
elif isinstance(a , a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Any = compute_effective_axis_dimension(a , fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCAmelCase__ : str = self._generate_dummy_images(a , a , a , a )
lowerCAmelCase__ : List[Any] = dict(preprocessor(images=a , return_tensors=a ) )
lowerCAmelCase__ : Any = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 307
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 1
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A__ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def lowerCAmelCase__ ( ) -> Optional[int]:
if os.name == "nt":
lowerCAmelCase__ : List[str] = CursorInfo()
lowerCAmelCase__ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : Union[str, Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowerCAmelCase__ ( ) -> List[Any]:
if os.name == "nt":
lowerCAmelCase__ : Any = CursorInfo()
lowerCAmelCase__ : str = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : List[str] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowerCAmelCase__ ( ) -> List[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 307
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> set[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = set(SCREAMING_SNAKE_CASE_ ), [start]
while stack:
lowerCAmelCase__ : List[Any] = stack.pop()
explored.add(SCREAMING_SNAKE_CASE_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(SCREAMING_SNAKE_CASE_ )
return explored
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1_000_000 ) -> int:
lowerCAmelCase__ : Dict = set(range(3 , SCREAMING_SNAKE_CASE_ , 2 ) )
primes.add(2 )
for p in range(3 , SCREAMING_SNAKE_CASE_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
lowerCAmelCase__ : Union[str, Any] = [float(SCREAMING_SNAKE_CASE_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 307
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 1
|
from __future__ import annotations
lowerCamelCase__ = [True] * 100_0001
lowerCamelCase__ = 2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
lowerCamelCase__ = False
i += 1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
return seive[n]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
return any(digit in '02468' for digit in str(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1_000_000 ) -> list[int]:
lowerCAmelCase__ : List[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(SCREAMING_SNAKE_CASE_ ) and not contains_an_even_digit(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[str] = str(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = [int(str_num[j:] + str_num[:j] ) for j in range(len(SCREAMING_SNAKE_CASE_ ) )]
if all(is_prime(SCREAMING_SNAKE_CASE_ ) for i in list_nums ):
result.append(SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""")
| 307
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
for attribute in key.split('.' ):
lowerCAmelCase__ : int = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
lowerCAmelCase__ : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
lowerCAmelCase__ : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowerCAmelCase__ : str = value
elif weight_type == "weight_v":
lowerCAmelCase__ : List[Any] = value
elif weight_type == "bias":
lowerCAmelCase__ : List[Any] = value
elif weight_type == "running_mean":
lowerCAmelCase__ : Dict = value
elif weight_type == "running_var":
lowerCAmelCase__ : List[Any] = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ : Optional[int] = value
elif weight_type == "inv_freq":
lowerCAmelCase__ : Tuple = value
else:
lowerCAmelCase__ : List[str] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : int = fairseq_model.state_dict()
lowerCAmelCase__ : Optional[int] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ : Union[str, Any] = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase__ : Tuple = True
if "*" in mapped_key:
lowerCAmelCase__ : Optional[int] = name.split(SCREAMING_SNAKE_CASE_ )[0].split('.' )[-2]
lowerCAmelCase__ : Optional[int] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE_ )
if "pos_bias_u" in name:
lowerCAmelCase__ : Dict = None
elif "pos_bias_v" in name:
lowerCAmelCase__ : int = None
elif "weight_g" in name:
lowerCAmelCase__ : Optional[int] = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ : str = 'weight_v'
elif "bias" in name:
lowerCAmelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ : int = 'weight'
elif "running_mean" in name:
lowerCAmelCase__ : List[str] = 'running_mean'
elif "inv_freq" in name:
lowerCAmelCase__ : Tuple = 'inv_freq'
elif "running_var" in name:
lowerCAmelCase__ : Optional[Any] = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase__ : int = 'num_batches_tracked'
else:
lowerCAmelCase__ : Tuple = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : List[Any] = full_name.split('conv_layers.' )[-1]
lowerCAmelCase__ : List[str] = name.split('.' )
lowerCAmelCase__ : Tuple = int(items[0] )
lowerCAmelCase__ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowerCAmelCase__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowerCAmelCase__ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowerCAmelCase__ : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowerCAmelCase__ : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True ) -> int:
if config_path is not None:
lowerCAmelCase__ : List[str] = WavaVecaConformerConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , hidden_act='swish' )
else:
lowerCAmelCase__ : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ : Any = 'rotary'
if is_finetuned:
if dict_path:
lowerCAmelCase__ : str = Dictionary.load(SCREAMING_SNAKE_CASE_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ : Optional[Any] = target_dict.pad_index
lowerCAmelCase__ : str = target_dict.bos_index
lowerCAmelCase__ : int = target_dict.eos_index
lowerCAmelCase__ : str = len(target_dict.symbols )
lowerCAmelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE_ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[int] = 1
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Dict = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase__ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Optional[Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = WavaVecaConformerForCTC(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Any = WavaVecaConformerForPreTraining(SCREAMING_SNAKE_CASE_ )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCAmelCase__ : str = argparse.Namespace(task='audio_pretraining' )
lowerCAmelCase__ : Union[str, Any] = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : Any , a : Tuple , a : Optional[int]=7 , a : Dict=3 , a : Any=18 , a : Optional[Any]=30 , a : List[str]=400 , a : int=True , a : Optional[Any]=None , a : Tuple=True , a : int=None , a : Dict=True , a : List[str]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , a : List[Any]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , a : List[str]=True , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = size if size is not None else {'height': 224, 'width': 224}
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Optional[Any] = num_channels
lowerCAmelCase__ : List[Any] = image_size
lowerCAmelCase__ : Dict = min_resolution
lowerCAmelCase__ : Dict = max_resolution
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Tuple = size
lowerCAmelCase__ : Union[str, Any] = do_center_crop
lowerCAmelCase__ : int = crop_size
lowerCAmelCase__ : str = do_normalize
lowerCAmelCase__ : Any = image_mean
lowerCAmelCase__ : List[str] = image_std
lowerCAmelCase__ : Optional[Any] = do_convert_rgb
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _lowerCamelCase ( self : List[Any] , a : Optional[int]=False , a : Tuple=False , a : Union[str, Any]=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCAmelCase__ : int = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCAmelCase__ : List[str] = []
for i in range(self.batch_size ):
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCAmelCase__ : int = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCAmelCase__ : List[str] = [torch.from_numpy(a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ChineseCLIPImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ChineseCLIPImageProcessingTester(self , do_center_crop=a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_convert_rgb' ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowerCAmelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase__ : Optional[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase__ : Dict = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ChineseCLIPImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=a )
lowerCAmelCase__ : str = 3
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_convert_rgb' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowerCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase__ : List[str] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 1
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCamelCase__ = logging.getLogger(__name__)
class A__ ( __magic_name__ ):
lowercase = 'token-classification'
def __init__( self : Optional[int] , a : List[str] ):
'''simple docstring'''
if type(a ) == dict:
lowerCAmelCase__ : List[str] = Namespace(**a )
lowerCAmelCase__ : int = import_module('tasks' )
try:
lowerCAmelCase__ : Any = getattr(a , hparams.task_type )
lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowerCAmelCase__ : List[str] = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase__ : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(a , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
return self.model(**a )
def _lowerCamelCase ( self : Any , a : Any , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : int = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : List[Any] = self(**a )
lowerCAmelCase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase__ : int = self._feature_file(a )
if os.path.exists(a ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , a )
lowerCAmelCase__ : Any = torch.load(a )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
lowerCAmelCase__ : str = self.token_classification_task.read_examples_from_file(args.data_dir , a )
lowerCAmelCase__ : str = self.token_classification_task.convert_examples_to_features(
a , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=a , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , a )
torch.save(a , a )
def _lowerCamelCase ( self : str , a : int , a : int , a : bool = False ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self._feature_file(a )
logger.info('Loading features from cached file %s' , a )
lowerCAmelCase__ : str = torch.load(a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase__ : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase__ : Dict = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(a , a , a , a ) , batch_size=a )
def _lowerCamelCase ( self : int , a : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
"""Compute validation""" ""
lowerCAmelCase__ : Optional[int] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : str = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Tuple = self(**a )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = outputs[:2]
lowerCAmelCase__ : Dict = logits.detach().cpu().numpy()
lowerCAmelCase__ : List[str] = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[Any] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.stack([x['val_loss'] for x in outputs] ).mean()
lowerCAmelCase__ : Optional[Any] = np.concatenate([x['pred'] for x in outputs] , axis=0 )
lowerCAmelCase__ : Dict = np.argmax(a , axis=2 )
lowerCAmelCase__ : str = np.concatenate([x['target'] for x in outputs] , axis=0 )
lowerCAmelCase__ : Optional[Any] = dict(enumerate(self.labels ) )
lowerCAmelCase__ : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase__ : int = {
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(a , a ),
'precision': precision_score(a , a ),
'recall': recall_score(a , a ),
'f1': fa_score(a , a ),
}
lowerCAmelCase__ : Optional[Any] = dict(results.items() )
lowerCAmelCase__ : Optional[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : Any , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self._eval_end(a )
lowerCAmelCase__ : Any = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[str] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._eval_end(a )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase__ : int = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( a : List[Any] , a : Optional[Any] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(a , a )
parser.add_argument(
'--task_type' , default='NER' , type=a , help='Task type to fine tune in training (e.g. NER, POS, etc)' )
parser.add_argument(
'--max_seq_length' , default=128 , type=a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=a , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=a , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCamelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = NERTransformer(args)
lowerCamelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
lowerCamelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 307
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 1
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def _lowerCamelCase ( *a : Any , **a : Any ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase ):
lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _lowerCamelCase ( self : Tuple , a : Union[str, Any] , a : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[int] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
'score': ANY(a ),
'label': ANY(a ),
'box': {'xmin': ANY(a ), 'ymin': ANY(a ), 'xmax': ANY(a ), 'ymax': ANY(a )},
} , )
import datasets
lowerCAmelCase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowerCAmelCase__ : List[str] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
lowerCAmelCase__ : Optional[Any] = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
'score': ANY(a ),
'label': ANY(a ),
'box': {'xmin': ANY(a ), 'ymin': ANY(a ), 'xmax': ANY(a ), 'ymax': ANY(a )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@require_torch
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = 'hf-internal-testing/tiny-detr-mobilenetsv3'
lowerCAmelCase__ : List[str] = AutoModelForObjectDetection.from_pretrained(a )
lowerCAmelCase__ : Dict = AutoFeatureExtractor.from_pretrained(a )
lowerCAmelCase__ : Union[str, Any] = ObjectDetectionPipeline(model=a , feature_extractor=a )
lowerCAmelCase__ : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
lowerCAmelCase__ : List[str] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'facebook/detr-resnet-50'
lowerCAmelCase__ : Optional[int] = AutoModelForObjectDetection.from_pretrained(a )
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(a )
lowerCAmelCase__ : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
lowerCAmelCase__ : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
lowerCAmelCase__ : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = 'facebook/detr-resnet-50'
lowerCAmelCase__ : Union[str, Any] = pipeline('object-detection' , model=a )
lowerCAmelCase__ : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
lowerCAmelCase__ : int = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = 0.9_9_8_5
lowerCAmelCase__ : List[str] = 'facebook/detr-resnet-50'
lowerCAmelCase__ : Dict = pipeline('object-detection' , model=a )
lowerCAmelCase__ : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = 'Narsil/layoutlmv3-finetuned-funsd'
lowerCAmelCase__ : Tuple = 0.9_9_9_3
lowerCAmelCase__ : Optional[int] = pipeline('object-detection' , model=a , threshold=a )
lowerCAmelCase__ : Optional[Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 307
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""YolosFeatureExtractor"""]
lowerCamelCase__ = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 1
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A__ ( unittest.TestCase ):
lowercase = inspect.getfile(accelerate.test_utils )
lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
lowercase = ['accelerate', 'launch']
lowercase = Path.home() / '.cache/huggingface/accelerate'
lowercase = 'default_config.yaml'
lowercase = config_folder / config_file
lowercase = config_folder / '_default_config.yaml'
lowercase = Path('tests/test_configs' )
@classmethod
def _lowerCamelCase ( cls : Dict ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _lowerCamelCase ( cls : List[Any] ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=a ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(a ), self.test_file_path] , env=os.environ.copy() )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class A__ ( unittest.TestCase ):
lowercase = 'test-tpu'
lowercase = 'us-central1-a'
lowercase = 'ls'
lowercase = ['accelerate', 'tpu-config']
lowercase = 'cd /usr/share'
lowercase = 'tests/test_samples/test_command_file.sh'
lowercase = 'Running gcloud compute tpus tpu-vm ssh'
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=a )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , a , )
| 307
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 1
|
from __future__ import annotations
from typing import Any
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
create_state_space_tree(SCREAMING_SNAKE_CASE_ , [] , 0 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
if index == len(SCREAMING_SNAKE_CASE_ ):
print(SCREAMING_SNAKE_CASE_ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCamelCase__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 307
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase__ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = 1
@register_to_config
def __init__( self : List[str] , a : int=2_000 , a : int=0.1 , a : Tuple=20 , a : Optional[int]=1E-3 ):
'''simple docstring'''
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : int = None
def _lowerCamelCase ( self : List[Any] , a : str , a : Union[str, torch.device] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _lowerCamelCase ( self : Dict , a : Optional[Any] , a : Any , a : int , a : List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase__ : str = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase__ : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase__ : List[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase__ : Optional[int] = std.unsqueeze(-1 )
lowerCAmelCase__ : Optional[Any] = -score / std
# compute
lowerCAmelCase__ : Optional[int] = -1.0 / len(self.timesteps )
lowerCAmelCase__ : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase__ : Optional[int] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase__ : Tuple = beta_t.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = -0.5 * beta_t * x
lowerCAmelCase__ : Dict = torch.sqrt(a )
lowerCAmelCase__ : str = drift - diffusion**2 * score
lowerCAmelCase__ : Tuple = x + drift * dt
# add noise
lowerCAmelCase__ : Optional[Any] = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
lowerCAmelCase__ : Union[str, Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'scipy']
def __init__( self : str , *a : Union[str, Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
| 307
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 1
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowerCamelCase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowerCamelCase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowerCamelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict[str, int]:
lowerCAmelCase__ : Optional[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return x[0]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : int = get_letter_count(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = ''.join(freq_to_letter[freq] )
lowerCAmelCase__ : List[str] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Optional[int] = get_frequency_order(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 1
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class A__ ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : int = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
lowerCAmelCase__ : int = load_dataset('ashraq/esc50' )
lowerCAmelCase__ : List[str] = dataset['train']['audio'][-1]['array']
lowerCAmelCase__ : Any = audio_classifier(a , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(a ) , [{'score': 0.5_0_1, 'label': 'Sound of a dog'}, {'score': 0.4_9_9, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
@slow
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
lowerCAmelCase__ : Union[str, Any] = load_dataset('ashraq/esc50' )
lowerCAmelCase__ : int = dataset['train']['audio'][-1]['array']
lowerCAmelCase__ : List[str] = audio_classifier(a , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(a ) , [
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
] , )
lowerCAmelCase__ : Tuple = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(a ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
lowerCAmelCase__ : Dict = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __magic_name__ ):
lowercase = (DDPMScheduler,)
def _lowerCamelCase ( self : Dict , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Tuple = scheduler_class(**a )
lowerCAmelCase__ : Optional[int] = len(a )
lowerCAmelCase__ : List[str] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase__ : int = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : List[str] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Optional[Any] = scheduler.step(a , a , a , generator=a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase__ : Optional[int] = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
lowerCAmelCase__ : List[Any] = len(a )
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
lowerCAmelCase__ : Any = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : int = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a , generator=a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase__ : int = pred_prev_sample
lowerCAmelCase__ : Optional[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Tuple = scheduler_class(**a )
lowerCAmelCase__ : List[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a )
lowerCAmelCase__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
lowerCAmelCase__ : Dict = -1
else:
lowerCAmelCase__ : Any = timesteps[i + 1]
lowerCAmelCase__ : List[Any] = scheduler.previous_timestep(a )
lowerCAmelCase__ : Tuple = prev_t.item()
self.assertEqual(a , a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Union[str, Any] = scheduler_class(**a )
lowerCAmelCase__ : int = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**a )
lowerCAmelCase__ : Dict = [100, 87, 50, 1, 0]
lowerCAmelCase__ : List[str] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
from __future__ import annotations
from random import choice
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return choice(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : List[Any] = random_pivot(SCREAMING_SNAKE_CASE_ )
# partition based on pivot
# linear time
lowerCAmelCase__ : List[Any] = [e for e in lst if e < pivot]
lowerCAmelCase__ : Union[str, Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(SCREAMING_SNAKE_CASE_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(SCREAMING_SNAKE_CASE_ ) < k - 1:
return kth_number(SCREAMING_SNAKE_CASE_ , k - len(SCREAMING_SNAKE_CASE_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 1
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A__ ( __magic_name__ ):
lowercase = 42
lowercase = jnp.floataa
lowercase = True
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().setup()
lowerCAmelCase__ : int = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Dict , *a : Optional[int] , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = super().__call__(*a , **a )
lowerCAmelCase__ : Optional[Any] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A__ ( __magic_name__ ):
lowercase = FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
def cross_entropy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
lowerCAmelCase__ : int = logits.shape[-1]
lowerCAmelCase__ : Dict = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE_ )[None]).astype('f4' )
lowerCAmelCase__ : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
lowerCAmelCase__ : str = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCAmelCase__ : Any = reduction(SCREAMING_SNAKE_CASE_ )
return loss
lowerCAmelCase__ : Optional[int] = partial(SCREAMING_SNAKE_CASE_ , reduction=jnp.mean )
lowerCAmelCase__ : Any = cross_entropy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = cross_entropy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = cross_entropy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A__ :
lowercase = "google/bigbird-roberta-base"
lowercase = 3000
lowercase = 10500
lowercase = 128
lowercase = 3
lowercase = 1
lowercase = 5
# tx_args
lowercase = 3E-5
lowercase = 0.0
lowercase = 20000
lowercase = 0.00_95
lowercase = "bigbird-roberta-natural-questions"
lowercase = "training-expt"
lowercase = "data/nq-training.jsonl"
lowercase = "data/nq-validation.jsonl"
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=a )
lowerCAmelCase__ : List[str] = os.path.join(self.base_dir , self.save_dir )
lowerCAmelCase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class A__ :
lowercase = 42
lowercase = 4096 # no dynamic padding on TPUs
def __call__( self : List[str] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.collate_fn(a )
lowerCAmelCase__ : List[Any] = jax.tree_util.tree_map(a , a )
return batch
def _lowerCamelCase ( self : int , a : Any ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : str = self.fetch_inputs(features['input_ids'] )
lowerCAmelCase__ : Optional[Any] = {
'input_ids': jnp.array(a , dtype=jnp.intaa ),
'attention_mask': jnp.array(a , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def _lowerCamelCase ( self : Optional[Any] , a : list ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [self._fetch_inputs(a ) for ids in input_ids]
return zip(*a )
def _lowerCamelCase ( self : List[Any] , a : list ):
'''simple docstring'''
lowerCAmelCase__ : int = [1 for _ in range(len(a ) )]
while len(a ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Dict:
if seed is not None:
lowerCAmelCase__ : List[Any] = dataset.shuffle(seed=SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) // batch_size ):
lowerCAmelCase__ : Optional[int] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE_ )
@partial(jax.pmap , axis_name='batch' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
def loss_fn(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : str = model_inputs.pop('start_labels' )
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop('end_labels' )
lowerCAmelCase__ : Tuple = model_inputs.pop('pooled_labels' )
lowerCAmelCase__ : Union[str, Any] = state.apply_fn(**SCREAMING_SNAKE_CASE_ , params=SCREAMING_SNAKE_CASE_ , dropout_rng=SCREAMING_SNAKE_CASE_ , train=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ , lowerCAmelCase__ : int = jax.random.split(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = jax.value_and_grad(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = grad_fn(state.params )
lowerCAmelCase__ : Any = jax.lax.pmean({'loss': loss} , axis_name='batch' )
lowerCAmelCase__ : int = jax.lax.pmean(SCREAMING_SNAKE_CASE_ , 'batch' )
lowerCAmelCase__ : Dict = state.apply_gradients(grads=SCREAMING_SNAKE_CASE_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Tuple = model_inputs.pop('start_labels' )
lowerCAmelCase__ : int = model_inputs.pop('end_labels' )
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop('pooled_labels' )
lowerCAmelCase__ : List[str] = state.apply_fn(**SCREAMING_SNAKE_CASE_ , params=state.params , train=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = outputs
lowerCAmelCase__ : Optional[Any] = state.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class A__ ( train_state.TrainState ):
lowercase = struct.field(pytree_node=__magic_name__ )
@dataclass
class A__ :
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = None
def _lowerCamelCase ( self : List[Any] , a : Optional[int] , a : List[Any] , a : Union[str, Any] , a : Dict=None ):
'''simple docstring'''
lowerCAmelCase__ : str = model.params
lowerCAmelCase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=a , tx=a , loss_fn=a , )
if ckpt_dir is not None:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = restore_checkpoint(a , a )
lowerCAmelCase__ : Any = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = build_tx(**a )
lowerCAmelCase__ : Optional[Any] = train_state.TrainState(
step=a , apply_fn=model.__call__ , params=a , tx=a , opt_state=a , )
lowerCAmelCase__ : int = args
lowerCAmelCase__ : Optional[int] = data_collator
lowerCAmelCase__ : int = lr
lowerCAmelCase__ : str = params
lowerCAmelCase__ : List[Any] = jax_utils.replicate(a )
return state
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any] , a : Optional[int] , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.args
lowerCAmelCase__ : List[str] = len(a ) // args.batch_size
lowerCAmelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase__ : Tuple = jax.random.split(a , jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCAmelCase__ : Optional[Any] = jnp.array(0 , dtype=jnp.floataa )
lowerCAmelCase__ : Optional[int] = get_batched_dataset(a , args.batch_size , seed=a )
lowerCAmelCase__ : int = 0
for batch in tqdm(a , total=a , desc=f'''Running EPOCH-{epoch}''' ):
lowerCAmelCase__ : Optional[int] = self.data_collator(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.train_step_fn(a , a , **a )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
lowerCAmelCase__ : Union[str, Any] = jax_utils.unreplicate(state.step )
lowerCAmelCase__ : Dict = running_loss.item() / i
lowerCAmelCase__ : List[Any] = self.scheduler_fn(state_step - 1 )
lowerCAmelCase__ : List[str] = self.evaluate(a , a )
lowerCAmelCase__ : int = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(a ) )
self.logger.log(a , commit=a )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=a )
def _lowerCamelCase ( self : Optional[int] , a : Optional[int] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = get_batched_dataset(a , self.args.batch_size )
lowerCAmelCase__ : str = len(a ) // self.args.batch_size
lowerCAmelCase__ : Any = jnp.array(0 , dtype=jnp.floataa )
lowerCAmelCase__ : Any = 0
for batch in tqdm(a , total=a , desc='Evaluating ... ' ):
lowerCAmelCase__ : Union[str, Any] = self.data_collator(a )
lowerCAmelCase__ : str = self.val_step_fn(a , **a )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def _lowerCamelCase ( self : Tuple , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = jax_utils.unreplicate(a )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=' ... ' )
self.model_save_fn(a , params=state.params )
with open(os.path.join(a , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(a , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(a , 'data_collator.joblib' ) )
with open(os.path.join(a , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , a )
print('DONE' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ' )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'flax_model.msgpack' ) , 'rb' ) as f:
lowerCAmelCase__ : List[str] = from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'opt_state.msgpack' ) , 'rb' ) as f:
lowerCAmelCase__ : str = from_bytes(state.opt_state , f.read() )
lowerCAmelCase__ : int = joblib.load(os.path.join(SCREAMING_SNAKE_CASE_ , 'args.joblib' ) )
lowerCAmelCase__ : Dict = joblib.load(os.path.join(SCREAMING_SNAKE_CASE_ , 'data_collator.joblib' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'training_state.json' ) , 'r' ) as f:
lowerCAmelCase__ : Optional[int] = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = num_train_steps - warmup_steps
lowerCAmelCase__ : Dict = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE_ , end_value=SCREAMING_SNAKE_CASE_ , transition_steps=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE_ , end_value=1e-7 , transition_steps=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
def weight_decay_mask(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : int = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = scheduler_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE_ , weight_decay=SCREAMING_SNAKE_CASE_ , mask=SCREAMING_SNAKE_CASE_ )
return tx, lr
| 307
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 1
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase__ = trt.Logger(trt.Logger.WARNING)
lowerCamelCase__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
lowerCamelCase__ = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
lowerCamelCase__ = args.per_device_eval_batch_size
lowerCamelCase__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase__ = True
lowerCamelCase__ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
lowerCamelCase__ = """temp_engine/bert-fp16.engine"""
if args.inta:
lowerCamelCase__ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
lowerCamelCase__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase__ = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : List[Any] = np.asarray(inputs['input_ids'] , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
lowerCAmelCase__ : Any = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE_ )
# start time
lowerCAmelCase__ : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE_ ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE_ ), int(SCREAMING_SNAKE_CASE_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase__ : List[Any] = time.time()
lowerCAmelCase__ : str = end_time - start_time
lowerCAmelCase__ : Dict = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase__ = raw_datasets["""validation"""].column_names
lowerCamelCase__ = """question""" if """question""" in column_names else column_names[0]
lowerCamelCase__ = """context""" if """context""" in column_names else column_names[1]
lowerCamelCase__ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase__ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
lowerCamelCase__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowerCAmelCase__ : Tuple = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase__ : str = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=SCREAMING_SNAKE_CASE_ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase__ : str = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase__ : Union[str, Any] = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase__ : int = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase__ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase__ : List[str] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
lowerCamelCase__ = raw_datasets["""validation"""]
# Validation Feature Creation
lowerCamelCase__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
lowerCamelCase__ = default_data_collator
lowerCamelCase__ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
lowerCamelCase__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="eval" ) -> List[str]:
# Post-processing: we match the start logits and end logits to answers in the original context.
lowerCAmelCase__ : Tuple = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase__ : Tuple = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase__ : Optional[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase__ : List[str] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE_ , label_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE_ ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase__ = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase__ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
lowerCamelCase__ = 0.0
lowerCamelCase__ = 0
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase__ , lowerCamelCase__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase__ , lowerCamelCase__ = outputs
lowerCamelCase__ = torch.tensor(start_logits)
lowerCamelCase__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowerCamelCase__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowerCamelCase__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowerCamelCase__ = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase__ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
lowerCamelCase__ = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 307
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 1
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , **a : Any ):
'''simple docstring'''
super().__init__(**a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[Any] , a : Union[str, List[str], "Image", List["Image"]] , **a : Optional[int] ):
'''simple docstring'''
return super().__call__(a , **a )
def _lowerCamelCase ( self : Union[str, Any] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Optional[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _lowerCamelCase ( self : Dict , a : Union[str, Any] , a : Any=None , a : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
lowerCAmelCase__ : Any = load_image(a )
lowerCAmelCase__ : Dict = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ : Optional[Any] = candidate_labels
lowerCAmelCase__ : int = [hypothesis_template.format(a ) for x in candidate_labels]
lowerCAmelCase__ : List[str] = self.tokenizer(a , return_tensors=self.framework , padding=a )
lowerCAmelCase__ : Tuple = [text_inputs]
return inputs
def _lowerCamelCase ( self : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = model_inputs.pop('candidate_labels' )
lowerCAmelCase__ : List[Any] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , a ):
lowerCAmelCase__ : Tuple = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : int = text_inputs[0][0]
lowerCAmelCase__ : Any = self.model(**a , **a )
lowerCAmelCase__ : int = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self : int , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = model_outputs.pop('candidate_labels' )
lowerCAmelCase__ : List[Any] = model_outputs['logits'][0]
if self.framework == "pt":
lowerCAmelCase__ : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ : Tuple = probs.tolist()
if not isinstance(a , a ):
lowerCAmelCase__ : int = [scores]
elif self.framework == "tf":
lowerCAmelCase__ : Tuple = stable_softmax(a , axis=-1 )
lowerCAmelCase__ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase__ : Any = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(a , a ) , key=lambda a : -x[0] )
]
return result
| 307
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'funnel'
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self : Optional[Any] , a : Optional[int]=30_522 , a : Any=[4, 4, 4] , a : int=None , a : Optional[int]=2 , a : int=768 , a : Dict=12 , a : Dict=64 , a : Union[str, Any]=3_072 , a : Any="gelu_new" , a : List[str]=0.1 , a : Optional[int]=0.1 , a : Optional[int]=0.0 , a : Optional[int]=0.1 , a : Optional[int]=None , a : Union[str, Any]=1E-9 , a : Dict="mean" , a : Tuple="relative_shift" , a : Any=True , a : List[str]=True , a : Tuple=True , **a : Optional[Any] , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Optional[int] = block_sizes
lowerCAmelCase__ : str = [1] * len(a ) if block_repeats is None else block_repeats
assert len(a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowerCAmelCase__ : Union[str, Any] = num_decoder_layers
lowerCAmelCase__ : Dict = d_model
lowerCAmelCase__ : int = n_head
lowerCAmelCase__ : Tuple = d_head
lowerCAmelCase__ : Union[str, Any] = d_inner
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Dict = hidden_dropout
lowerCAmelCase__ : Dict = attention_dropout
lowerCAmelCase__ : Union[str, Any] = activation_dropout
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Optional[Any] = initializer_std
lowerCAmelCase__ : Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
lowerCAmelCase__ : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
lowerCAmelCase__ : Dict = attention_type
lowerCAmelCase__ : Any = separate_cls
lowerCAmelCase__ : Union[str, Any] = truncate_seq
lowerCAmelCase__ : List[str] = pool_q_only
super().__init__(**a )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def _lowerCamelCase ( self : str , a : Dict ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def _lowerCamelCase ( self : Dict , a : Tuple ):
'''simple docstring'''
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 1
|
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
lowercase = 42
lowercase = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
lowercase = {**hyperparameters, 'max_steps': 1000}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return f'''{self.framework}-transfromers-test'''
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Any = SageMakerTestEnvironment(framework=request.cls.framework )
| 307
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 1
|
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : List[str] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1 / 12_345 ) -> int:
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : int = 3
while True:
lowerCAmelCase__ : Union[str, Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = int(SCREAMING_SNAKE_CASE_ )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE_ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 307
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 1
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = WavaVecaPhonemeCTCTokenizer
lowercase = False
def _lowerCamelCase ( self : str ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : str = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
lowerCAmelCase__ : Dict = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Optional[int] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
def _lowerCamelCase ( self : Tuple , a : str , a : Any=False , a : int=20 , a : str=5 ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=a )) for i in range(len(a ) )]
lowerCAmelCase__ : Dict = list(filter(lambda a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=a ) , a ) )
if max_length is not None and len(a ) > max_length:
lowerCAmelCase__ : List[Any] = toks[:max_length]
if min_length is not None and len(a ) < min_length and len(a ) > 0:
while len(a ) < min_length:
lowerCAmelCase__ : Any = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ : str = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ : List[str] = tokenizer.decode(a , clean_up_tokenization_spaces=a )
if " " not in output_txt and len(a ) > 1:
lowerCAmelCase__ : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a )
)
if with_prefix_space:
lowerCAmelCase__ : int = ' ' + output_txt
lowerCAmelCase__ : List[Any] = tokenizer.encode(a , add_special_tokens=a )
return output_txt, output_ids
def _lowerCamelCase ( self : Optional[Any] , **a : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
lowerCAmelCase__ : Tuple = tokenizer('m xxx ɪ' , do_phonemize=a ).input_ids
self.assertEqual(a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
lowerCAmelCase__ : Optional[int] = tokenizer('m aaa ɪ ccc' , do_phonemize=a ).input_ids
self.assertEqual(a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCAmelCase__ : Dict = tokenizer('maɪ c' , do_phonemize=a ).input_ids
self.assertEqual(a , [3, 200] ) # mai should be <unk> (=3)
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase__ : int = 'Hello how are you'
lowerCAmelCase__ : Any = tokenizer.phonemize(a , phonemizer_lang='en-us' )
self.assertEqual(a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase__ : Optional[Any] = 'Hello how are you'
lowerCAmelCase__ : str = tokenizer.phonemize(a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(a ).input_ids , tokenizer(a , do_phonemize=a ).input_ids )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase__ : str = 'Hello how are you'
lowerCAmelCase__ : Optional[Any] = tokenizer.phonemize(a , phonemizer_lang='en-us' )
lowerCAmelCase__ : int = tokenizer.decode(tokenizer(a ).input_ids )
self.assertEqual(a , a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase__ : Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCAmelCase__ : Tuple = tokenizer.decode(sample_ids[0] )
lowerCAmelCase__ : List[str] = tokenizer.batch_decode(a )
self.assertEqual(a , batch_tokens[0] )
self.assertEqual(a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase__ : Optional[int] = 'Hello how are you'
lowerCAmelCase__ : List[Any] = tokenizer.phonemize(a , phonemizer_lang='en-us' )
self.assertEqual(a , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase__ : List[str] = 'Hello how are you'
lowerCAmelCase__ : List[str] = tokenizer.phonemize(a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(a ).input_ids , tokenizer(a , do_phonemize=a ).input_ids )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
lowerCAmelCase__ : int = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCAmelCase__ : List[str] = tokenizer.decode(sample_ids[0] )
lowerCAmelCase__ : Optional[Any] = tokenizer.batch_decode(a )
self.assertEqual(a , batch_tokens[0] )
self.assertEqual(a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
lowerCAmelCase__ : str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=a )
lowerCAmelCase__ : Any = tokenizer.batch_decode(a , filter_word_delimiter_token=a )
self.assertEqual(a , batch_tokens[0] )
self.assertEqual(a , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase__ : Optional[Any] = 'Hello how are you'
lowerCAmelCase__ : List[Any] = tokenizer.phonemize(a , phonemizer_lang='en-us' )
lowerCAmelCase__ : int = tokenizer.decode(tokenizer(a ).input_ids , filter_word_delimiter_token=a )
self.assertEqual(a , a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase__ : Tuple = 'Hello how are you'
lowerCAmelCase__ : Any = tokenizer.phonemize(a , phonemizer_lang='en-us' )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(tokenizer(a ).input_ids , filter_word_delimiter_token=a )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=a )
lowerCAmelCase__ : int = 'Hello how are you'
lowerCAmelCase__ : Union[str, Any] = tokenizer(a , phonemizer_lang='en-us' ).input_ids
lowerCAmelCase__ : List[str] = tokenizer(a , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(a , a )
lowerCAmelCase__ : Any = tokenizer.decode(a )
lowerCAmelCase__ : int = tokenizer.decode(a )
self.assertEqual(a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(a , 'ɛ l o h aʊ a ʁ j u' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase__ : Optional[int] = 'Hello how Are you'
lowerCAmelCase__ : str = 'hello how are you'
lowerCAmelCase__ : Optional[int] = tokenizer(a ).input_ids
lowerCAmelCase__ : int = tokenizer(a ).input_ids
self.assertEqual(a , a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
lowerCAmelCase__ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(a )
self.assertEqual(a , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def _lowerCamelCase ( a : Optional[Any] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = [d[key] for d in offsets]
return retrieved_list
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCAmelCase__ : List[str] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCAmelCase__ : Dict = tokenizer.decode(a , output_char_offsets=a , filter_word_delimiter_token=a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(a , a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(a : Optional[Any] , a : str ):
self.assertTrue(isinstance(a , a ) )
self.assertTrue(isinstance(outputs_list[0] , a ) )
# transform list to ModelOutput
lowerCAmelCase__ : Union[str, Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(a : List[Any] , a : Dict ):
if isinstance(a , a ):
[recursive_check(a , a ) for la, la in zip(a , a )]
self.assertEqual(a , a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
lowerCAmelCase__ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCAmelCase__ : int = tokenizer.batch_decode(a , output_char_offsets=a )
lowerCAmelCase__ : Optional[Any] = [tokenizer.decode(a , output_char_offsets=a ) for ids in sample_ids]
check_list_tuples_equal(a , a )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : Union[str, Any] = tokenizer.vocab_size
lowerCAmelCase__ : str = len(a )
self.assertNotEqual(a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase__ : Dict = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowerCAmelCase__ : Dict = tokenizer.add_tokens(a )
lowerCAmelCase__ : Tuple = tokenizer.vocab_size
lowerCAmelCase__ : int = len(a )
self.assertNotEqual(a , 0 )
self.assertEqual(a , a )
self.assertEqual(a , len(a ) )
self.assertEqual(a , all_size + len(a ) )
lowerCAmelCase__ : str = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=a )
self.assertGreaterEqual(len(a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase__ : Optional[Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowerCAmelCase__ : str = tokenizer.add_special_tokens(a )
lowerCAmelCase__ : Optional[Any] = tokenizer.vocab_size
lowerCAmelCase__ : Dict = len(a )
self.assertNotEqual(a , 0 )
self.assertEqual(a , a )
self.assertEqual(a , len(a ) )
self.assertEqual(a , all_size_a + len(a ) )
lowerCAmelCase__ : Tuple = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=a )
self.assertGreaterEqual(len(a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.get_tokenizers(fast=a , do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : Optional[int] = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
lowerCAmelCase__ : List[Any] = tokenizer.convert_tokens_to_string(a )
self.assertIsInstance(output['text'] , a )
| 307
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[Any] , a : List[Any] , a : Union[str, Any]=7 , a : str=3 , a : Optional[Any]=18 , a : int=30 , a : int=400 , a : Dict=True , a : Optional[int]=None , a : Optional[Any]=True , a : Dict=None , a : List[str]=True , a : Tuple=[0.5, 0.5, 0.5] , a : List[str]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = size if size is not None else {'shortest_edge': 18}
lowerCAmelCase__ : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : Optional[Any] = image_size
lowerCAmelCase__ : Optional[int] = min_resolution
lowerCAmelCase__ : str = max_resolution
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : List[str] = size
lowerCAmelCase__ : int = do_center_crop
lowerCAmelCase__ : Union[str, Any] = crop_size
lowerCAmelCase__ : Tuple = do_normalize
lowerCAmelCase__ : Optional[int] = image_mean
lowerCAmelCase__ : Union[str, Any] = image_std
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = LevitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = LevitImageProcessingTester(self )
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'size' ) )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowerCAmelCase__ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase__ : int = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 1
|
import numpy as np
from PIL import Image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
lowerCAmelCase__ : Optional[Any] = np.array(SCREAMING_SNAKE_CASE_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase__ : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase__ : Tuple = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase__ : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Union[str, Any] = 0
return updated_arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
lowerCAmelCase__ : List[str] = np.array(SCREAMING_SNAKE_CASE_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[int] = 0
# compute the shape of the output matrix
lowerCAmelCase__ : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase__ : Optional[Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase__ : int = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : int = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
lowerCamelCase__ = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 307
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
lowerCAmelCase__ : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
lowerCAmelCase__ : str = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , )
torch.manual_seed(0 )
lowerCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
lowerCAmelCase__ : str = CLIPTextModel(a )
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self : Any , a : Tuple , a : Optional[int]=0 ):
'''simple docstring'''
lowerCAmelCase__ : Any = floats_tensor((1, 16, 16) , rng=random.Random(a ) ).to(a )
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Tuple = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Any = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : Dict , a : str , a : Dict=0 ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
lowerCAmelCase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(a ) ).convert('RGB' )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Tuple = torch.manual_seed(a )
else:
lowerCAmelCase__ : Any = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : List[Any] = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : Any , a : Dict , a : List[Any]=0 ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
lowerCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : Dict = Image.fromarray(np.uinta(a ) ).convert('RGB' )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Tuple = torch.manual_seed(a )
else:
lowerCAmelCase__ : int = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : List[str] = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : str ):
'''simple docstring'''
if not hasattr(self.pipeline_class , '_optional_components' ):
return
lowerCAmelCase__ : str = self.get_dummy_components()
lowerCAmelCase__ : Union[str, Any] = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a , a , a )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCAmelCase__ : Any = self.get_dummy_inputs(a )
lowerCAmelCase__ : Any = pipe(**a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a )
lowerCAmelCase__ : Tuple = self.pipeline_class.from_pretrained(a )
pipe_loaded.to(a )
pipe_loaded.set_progress_bar_config(disable=a )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a , a ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a )
lowerCAmelCase__ : Optional[Any] = pipe_loaded(**a )[0]
lowerCAmelCase__ : Dict = np.abs(output - output_loaded ).max()
self.assertLess(a , 1E-4 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_mask_inputs(a )
lowerCAmelCase__ : Tuple = pipe.generate_mask(**a )
lowerCAmelCase__ : str = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCAmelCase__ : int = np.array([0] * 9 )
lowerCAmelCase__ : int = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : str = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inversion_inputs(a )
lowerCAmelCase__ : Tuple = pipe.invert(**a ).images
lowerCAmelCase__ : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase__ : Optional[int] = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCAmelCase__ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1E-3 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'cpu'
lowerCAmelCase__ : List[str] = self.get_dummy_components()
lowerCAmelCase__ : Dict = {'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'beta_schedule': 'scaled_linear'}
lowerCAmelCase__ : List[str] = DPMSolverMultistepScheduler(**a )
lowerCAmelCase__ : List[Any] = DPMSolverMultistepInverseScheduler(**a )
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : int = self.get_dummy_inversion_inputs(a )
lowerCAmelCase__ : List[str] = pipe.invert(**a ).images
lowerCAmelCase__ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase__ : List[Any] = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCAmelCase__ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowerCamelCase ( cls : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
lowerCAmelCase__ : str = raw_image.convert('RGB' ).resize((768, 768) )
lowerCAmelCase__ : int = raw_image
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa )
lowerCAmelCase__ : int = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase__ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = 'a bowl of fruit'
lowerCAmelCase__ : Dict = 'a bowl of pears'
lowerCAmelCase__ : str = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
lowerCAmelCase__ : Optional[Any] = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a ).latents
lowerCAmelCase__ : Optional[Any] = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
lowerCAmelCase__ : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa )
lowerCAmelCase__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase__ : Tuple = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = 'a bowl of fruit'
lowerCAmelCase__ : List[Any] = 'a bowl of pears'
lowerCAmelCase__ : Any = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
lowerCAmelCase__ : Dict = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents
lowerCAmelCase__ : Dict = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
lowerCAmelCase__ : Union[str, Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 307
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 1
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = """Hello, World!"""
lowerCamelCase__ = """en_XX"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCAmelCase__ : Optional[int] = Path('data_bin' )
lowerCAmelCase__ : str = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE_ ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE_ ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE_ ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE_ ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = xmod.model.encoder.sentence_encoder
lowerCAmelCase__ : Any = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCAmelCase__ : Optional[Any] = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = XmodForSequenceClassification(SCREAMING_SNAKE_CASE_ ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase__ : str = xmod_sent_encoder.embed_tokens.weight
lowerCAmelCase__ : List[Any] = xmod_sent_encoder.embed_positions.weight
lowerCAmelCase__ : Dict = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCAmelCase__ : List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCAmelCase__ : Optional[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase__ : int = model.roberta.encoder.layer[i]
lowerCAmelCase__ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCAmelCase__ : Any = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
lowerCAmelCase__ : Tuple = xmod_layer.self_attn.q_proj.weight
lowerCAmelCase__ : List[str] = xmod_layer.self_attn.q_proj.bias
lowerCAmelCase__ : Tuple = xmod_layer.self_attn.k_proj.weight
lowerCAmelCase__ : int = xmod_layer.self_attn.k_proj.bias
lowerCAmelCase__ : str = xmod_layer.self_attn.v_proj.weight
lowerCAmelCase__ : Union[str, Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase__ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
lowerCAmelCase__ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCAmelCase__ : List[str] = xmod_layer.self_attn.out_proj.bias
lowerCAmelCase__ : Optional[Any] = xmod_layer.self_attn_layer_norm.weight
lowerCAmelCase__ : List[str] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCAmelCase__ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
lowerCAmelCase__ : List[str] = xmod_layer.fca.weight
lowerCAmelCase__ : Optional[int] = xmod_layer.fca.bias
# output
lowerCAmelCase__ : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
lowerCAmelCase__ : Union[str, Any] = xmod_layer.fca.weight
lowerCAmelCase__ : List[str] = xmod_layer.fca.bias
lowerCAmelCase__ : List[Any] = xmod_layer.final_layer_norm.weight
lowerCAmelCase__ : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCAmelCase__ : List[Any] = xmod_layer.adapter_layer_norm.weight
lowerCAmelCase__ : Any = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCAmelCase__ : List[str] = bert_output.adapter_modules[lang_code]
lowerCAmelCase__ : int = xmod_layer.adapter_modules[lang_code]
lowerCAmelCase__ : str = from_adapter.fca.weight
lowerCAmelCase__ : int = from_adapter.fca.bias
lowerCAmelCase__ : str = from_adapter.fca.weight
lowerCAmelCase__ : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCAmelCase__ : Any = xmod_sent_encoder.layer_norm.weight
lowerCAmelCase__ : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCAmelCase__ : str = xmod.model.classification_heads['mnli'].dense.weight
lowerCAmelCase__ : Any = xmod.model.classification_heads['mnli'].dense.bias
lowerCAmelCase__ : Union[str, Any] = xmod.model.classification_heads['mnli'].out_proj.weight
lowerCAmelCase__ : Tuple = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCAmelCase__ : str = xmod.model.encoder.lm_head.dense.weight
lowerCAmelCase__ : List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCAmelCase__ : Any = xmod.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase__ : str = xmod.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase__ : Dict = xmod.model.encoder.lm_head.weight
lowerCAmelCase__ : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase__ : int = xmod.encode(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = model(SCREAMING_SNAKE_CASE_ )[0]
if classification_head:
lowerCAmelCase__ : Union[str, Any] = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE_ ) )
else:
lowerCAmelCase__ : List[str] = xmod.model(SCREAMING_SNAKE_CASE_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCAmelCase__ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowerCAmelCase__ : int = torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCamelCase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 307
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = F'''Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = input_str.split('_' )
lowerCAmelCase__ : Any = 0 if use_pascal else 1
lowerCAmelCase__ : Union[str, Any] = words[start_index:]
lowerCAmelCase__ : List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCAmelCase__ : Optional[int] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 307
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 1
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE_ ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE_ )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ : Union[str, Any] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase__ : Union[str, Any] = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ : Optional[int] = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ : List[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ : Union[str, Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ : Any = dataset.map(
partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , )
# And finally save your dataset
lowerCAmelCase__ : Optional[int] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ )
# And save the index
lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class A__ :
lowercase = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class A__ :
lowercase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 307
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
import re
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : Any = re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
lowerCamelCase__ = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCamelCase__ = get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Optional[Any]:
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCAmelCase__ : int = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCAmelCase__ : Tuple = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowerCAmelCase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCAmelCase__ : Any = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowerCAmelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCAmelCase__ : int = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F'''Saving model to {ckpt_dir}''' )
lowerCAmelCase__ : int = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
lowerCAmelCase__ : List[str] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowerCAmelCase__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'''Loading model from {input_model_file}''' )
lowerCAmelCase__ : str = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCAmelCase__ : Any = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowerCAmelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'''Loading model from {input_model_file}''' )
lowerCAmelCase__ : Tuple = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCAmelCase__ : Union[str, Any] = (
os.path.join(SCREAMING_SNAKE_CASE_ , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
lowerCAmelCase__ : List[str] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , )
lowerCAmelCase__ : List[str] = state_dict['model']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Optional[int]:
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCAmelCase__ : List[Any] = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowerCAmelCase__ : Tuple = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowerCAmelCase__ : str = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
lowerCAmelCase__ : Any = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Any:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCAmelCase__ : List[str] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowerCAmelCase__ : Any = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowerCAmelCase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
lowerCAmelCase__ : Tuple = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
lowerCAmelCase__ : List[str] = (
os.path.join(SCREAMING_SNAKE_CASE_ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
lowerCAmelCase__ : Optional[Any] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , )
lowerCAmelCase__ : Dict = optim_state['optimizer']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
lowerCAmelCase__ : List[str] = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 1
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self : List[Any] , a : Optional[int] , a : Tuple=13 , a : Union[str, Any]=7 , a : str=True , a : Optional[Any]=True , a : int=True , a : int=True , a : Tuple=99 , a : List[str]=64 , a : str=32 , a : Optional[Any]=5 , a : List[Any]=4 , a : Tuple=37 , a : str="gelu" , a : Union[str, Any]=0.1 , a : Dict=0.1 , a : List[Any]=512 , a : Optional[int]=16 , a : Optional[Any]=2 , a : Dict=0.0_2 , a : int=3 , a : Optional[Any]=4 , a : Union[str, Any]=None , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : int = seq_length
lowerCAmelCase__ : Union[str, Any] = is_training
lowerCAmelCase__ : Optional[Any] = use_input_mask
lowerCAmelCase__ : List[str] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : str = embedding_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : Union[str, Any] = type_vocab_size
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : List[str] = num_choices
lowerCAmelCase__ : Any = scope
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : int = None
if self.use_token_type_ids:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Any , a : str , a : Any , a : Union[str, Any] , a : Any , a : List[str] , a : Tuple , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = MobileBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , token_type_ids=a )
lowerCAmelCase__ : Any = model(a , token_type_ids=a )
lowerCAmelCase__ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : Optional[Any] , a : Any , a : List[str] , a : Optional[Any] , a : List[str] , a : List[str] , a : List[Any] , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = MobileBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Tuple , a : Optional[Any] , a : Tuple , a : Optional[int] , a : Dict , a : List[Any] , a : List[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = MobileBertForNextSentencePrediction(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : str , a : str , a : Union[str, Any] , a : List[Any] , a : List[str] , a : Any , a : Any , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = MobileBertForPreTraining(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : Any , a : int , a : List[str] , a : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = MobileBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : List[str] , a : List[str] , a : List[str] , a : List[Any] , a : Dict , a : List[Any] , a : Tuple , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : int = MobileBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : str , a : Any , a : List[Any] , a : Optional[int] , a : List[Any] , a : Optional[int] , a : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Any = MobileBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : Any , a : Union[str, Any] , a : Optional[int] , a : Any , a : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.num_choices
lowerCAmelCase__ : List[Any] = MobileBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[str] = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _lowerCamelCase ( self : Optional[Any] , a : Dict , a : Optional[Any] , a : str=False ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
lowerCAmelCase__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
lowerCAmelCase__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = MobileBertModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return torch.tensor(
SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(a )
lowerCAmelCase__ : Any = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(a )[0]
lowerCAmelCase__ : Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : Any = torch.tensor(
[
[
[-2.4736526E07, 8.2691656E04, 1.6521838E05],
[-5.7541704E-01, 3.9056022E00, 4.4011507E00],
[2.6047359E00, 1.5677652E00, -1.7324188E-01],
]
] , device=a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase__ : List[Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase__ : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 307
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowerCAmelCase__ : Optional[int] = [True] * limit
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Dict = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCAmelCase__ : int = i * 2
while index < limit:
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : List[Any] = index + i
lowerCAmelCase__ : List[Any] = [2]
for i in range(3 , SCREAMING_SNAKE_CASE_ , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1_000_000 ) -> int:
lowerCAmelCase__ : List[Any] = prime_sieve(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[int] = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(i + length , len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : Union[str, Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCAmelCase__ : int = j - i
lowerCAmelCase__ : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 2 ) -> int:
def get_dataset(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : str = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowerCAmelCase__ : Dict = get_dataset(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = get_dataset(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
lowerCAmelCase__ : List[str] = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Any:
lowerCAmelCase__ : Any = []
for epoch in range(SCREAMING_SNAKE_CASE_ ):
# Train quickly
model.train()
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = batch
lowerCAmelCase__ : str = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A__ ( nn.Module ):
def __init__( self : Any ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.randn(1 ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def _lowerCamelCase ( self : Optional[Any] , a : Optional[int] ):
'''simple docstring'''
return x * self.a + self.b
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Dict = DummyModel()
lowerCAmelCase__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = dummy_dataloaders()
lowerCAmelCase__ : Tuple = ProjectConfiguration(total_limit=1 , project_dir=a , automatic_checkpoint_naming=a )
# Train baseline
lowerCAmelCase__ : Optional[int] = Accelerator(project_config=a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare(
a , a , a , a )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : str = DummyModel()
lowerCAmelCase__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowerCAmelCase__ , lowerCAmelCase__ : int = dummy_dataloaders()
# Train baseline
lowerCAmelCase__ : Dict = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = accelerator.prepare(
a , a , a , a )
# Save initial
lowerCAmelCase__ : Dict = os.path.join(a , 'initial' )
accelerator.save_state(a )
((lowerCAmelCase__) , (lowerCAmelCase__)) : str = model.a.item(), model.b.item()
lowerCAmelCase__ : Any = optimizer.state_dict()
lowerCAmelCase__ : str = train(3 , a , a , a , a )
((lowerCAmelCase__) , (lowerCAmelCase__)) : Union[str, Any] = model.a.item(), model.b.item()
lowerCAmelCase__ : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCAmelCase__ : Any = DummyModel()
lowerCAmelCase__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = dummy_dataloaders()
lowerCAmelCase__ : List[Any] = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = accelerator.prepare(
a , a , a , a )
accelerator.load_state(a )
((lowerCAmelCase__) , (lowerCAmelCase__)) : Any = model.a.item(), model.b.item()
lowerCAmelCase__ : Tuple = optimizer.state_dict()
self.assertEqual(a , a )
self.assertEqual(a , a )
self.assertEqual(a , a )
lowerCAmelCase__ : Optional[Any] = train(2 , a , a , a , a )
# Save everything
lowerCAmelCase__ : Optional[Any] = os.path.join(a , 'checkpoint' )
accelerator.save_state(a )
# Load everything back in and make sure all states work
accelerator.load_state(a )
test_rands += train(1 , a , a , a , a )
((lowerCAmelCase__) , (lowerCAmelCase__)) : int = model.a.item(), model.b.item()
lowerCAmelCase__ : Optional[Any] = optimizer.state_dict()
self.assertEqual(a , a )
self.assertEqual(a , a )
self.assertEqual(a , a )
self.assertEqual(a , a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Any = DummyModel()
lowerCAmelCase__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowerCAmelCase__ , lowerCAmelCase__ : int = dummy_dataloaders()
lowerCAmelCase__ : str = ProjectConfiguration(automatic_checkpoint_naming=a )
# Train baseline
lowerCAmelCase__ : int = Accelerator(project_dir=a , project_config=a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = accelerator.prepare(
a , a , a , a )
# Save initial
accelerator.save_state()
((lowerCAmelCase__) , (lowerCAmelCase__)) : Optional[int] = model.a.item(), model.b.item()
lowerCAmelCase__ : Dict = optimizer.state_dict()
lowerCAmelCase__ : Tuple = train(3 , a , a , a , a )
((lowerCAmelCase__) , (lowerCAmelCase__)) : str = model.a.item(), model.b.item()
lowerCAmelCase__ : Optional[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCAmelCase__ : Tuple = DummyModel()
lowerCAmelCase__ : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = dummy_dataloaders()
lowerCAmelCase__ : str = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=a )
lowerCAmelCase__ : str = Accelerator(project_dir=a , project_config=a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare(
a , a , a , a )
accelerator.load_state(os.path.join(a , 'checkpoints' , 'checkpoint_0' ) )
((lowerCAmelCase__) , (lowerCAmelCase__)) : Any = model.a.item(), model.b.item()
lowerCAmelCase__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(a , a )
self.assertEqual(a , a )
self.assertEqual(a , a )
lowerCAmelCase__ : Optional[int] = train(2 , a , a , a , a )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(a , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , a , a , a , a )
((lowerCAmelCase__) , (lowerCAmelCase__)) : str = model.a.item(), model.b.item()
lowerCAmelCase__ : Tuple = optimizer.state_dict()
self.assertEqual(a , a )
self.assertEqual(a , a )
self.assertEqual(a , a )
self.assertEqual(a , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = torch.tensor([1, 2, 3] )
lowerCAmelCase__ : Optional[int] = torch.tensor([2, 3, 4] )
lowerCAmelCase__ : Union[str, Any] = DummyModel()
lowerCAmelCase__ : Dict = torch.optim.Adam(net.parameters() )
lowerCAmelCase__ : List[Any] = Accelerator()
with self.assertRaises(a ) as ve:
accelerator.register_for_checkpointing(a , a , a , a )
lowerCAmelCase__ : List[Any] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Optional[int] = DummyModel()
lowerCAmelCase__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowerCAmelCase__ : Any = torch.optim.lr_scheduler.StepLR(a , step_size=1 , gamma=0.9_9 )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = dummy_dataloaders()
lowerCAmelCase__ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=a )
# Train baseline
lowerCAmelCase__ : int = Accelerator(project_dir=a , project_config=a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = accelerator.prepare(
a , a , a , a , a )
# Save initial
accelerator.save_state()
lowerCAmelCase__ : Tuple = scheduler.state_dict()
train(3 , a , a , a , a , a )
self.assertNotEqual(a , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(a , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(a , scheduler.state_dict() )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Any = DummyModel()
lowerCAmelCase__ : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=a , total_limit=2 )
# Train baseline
lowerCAmelCase__ : Optional[int] = Accelerator(project_dir=a , project_config=a )
lowerCAmelCase__ : Optional[Any] = accelerator.prepare(a )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(a , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(a , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(a , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(a , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase__ = """/tmp/accelerate/state_checkpointing"""
lowerCamelCase__ = DummyModel()
lowerCamelCase__ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
lowerCamelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCamelCase__ , lowerCamelCase__ = dummy_dataloaders()
lowerCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCamelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCamelCase__ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
lowerCamelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
lowerCamelCase__ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
lowerCamelCase__ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "x" , SCREAMING_SNAKE_CASE_ = 10**-10 , SCREAMING_SNAKE_CASE_ = 1 , ) -> complex:
lowerCAmelCase__ : Dict = symbols(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = lambdify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = lambdify(SCREAMING_SNAKE_CASE_ , diff(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : List[str] = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE_ ) != 0:
lowerCAmelCase__ : Optional[Any] = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE_ ) / diff_function(
SCREAMING_SNAKE_CASE_ )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCAmelCase__ : Tuple = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 307
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 1
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase__ : Any = 'xvjiarui/stable-diffusion-2-inpainting'
lowerCAmelCase__ , lowerCAmelCase__ : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a )
lowerCAmelCase__ : int = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase__ : str = jax.random.PRNGKey(0 )
lowerCAmelCase__ : int = 50
lowerCAmelCase__ : int = jax.device_count()
lowerCAmelCase__ : Optional[Any] = num_samples * [prompt]
lowerCAmelCase__ : List[str] = num_samples * [init_image]
lowerCAmelCase__ : int = num_samples * [mask_image]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = pipeline.prepare_inputs(a , a , a )
# shard inputs and rng
lowerCAmelCase__ : int = replicate(a )
lowerCAmelCase__ : str = jax.random.split(a , jax.device_count() )
lowerCAmelCase__ : Any = shard(a )
lowerCAmelCase__ : List[Any] = shard(a )
lowerCAmelCase__ : int = shard(a )
lowerCAmelCase__ : List[Any] = pipeline(
a , a , a , a , a , a , jit=a )
lowerCAmelCase__ : Any = output.images.reshape(a , 512 , 512 , 3 )
lowerCAmelCase__ : Union[str, Any] = images[0, 253:256, 253:256, -1]
lowerCAmelCase__ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ : str = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 307
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 1
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A__ ( __magic_name__ ):
def __init__( self : Dict , a : Optional[Any] , a : Union[str, Any]=13 , a : Optional[Any]=7 , a : Tuple=True , a : Any=True , a : List[Any]=False , a : Dict=True , a : int=99 , a : Optional[Any]=32 , a : Union[str, Any]=5 , a : List[str]=4 , a : Dict=37 , a : Optional[int]="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : Optional[int]=512 , a : Any=16 , a : int=2 , a : List[str]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Any=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : List[str] = seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Any = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : str = vocab_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Optional[Any] = type_vocab_size
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Any = num_labels
lowerCAmelCase__ : Optional[Any] = num_choices
lowerCAmelCase__ : Optional[int] = scope
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Union[str, Any] , a : Any , a : Dict , a : int , a : Union[str, Any] , a : Any , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = DistilBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : List[Any] , a : Tuple , a : Union[str, Any] , a : Optional[int] , a : Any , a : List[str] , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : str , a : List[Any] , a : Any , a : int , a : Optional[Any] , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : int = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Optional[int] , a : Dict , a : Any , a : List[Any] , a : str , a : str , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Optional[Any] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Any , a : str , a : str , a : Any , a : Optional[Any] , a : int , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.num_choices
lowerCAmelCase__ : Tuple = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Dict = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : Optional[Any] = config_and_inputs
lowerCAmelCase__ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = True
lowercase = True
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = DistilBertModelTester(self )
lowerCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=a , dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ : str = True
lowerCAmelCase__ : List[str] = model_class(config=a )
lowerCAmelCase__ : Optional[Any] = self._prepare_for_class(a , a )
lowerCAmelCase__ : Optional[int] = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowerCAmelCase__ : Tuple = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase__ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Any = model(a , attention_mask=a )[0]
lowerCAmelCase__ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
| 307
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 1
|
import functools
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
# Validation
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(SCREAMING_SNAKE_CASE_ ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 0
if min(SCREAMING_SNAKE_CASE_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(SCREAMING_SNAKE_CASE_ ) >= 366:
raise ValueError('All days elements should be less than 366' )
lowerCAmelCase__ : str = set(SCREAMING_SNAKE_CASE_ )
@functools.cache
def dynamic_programming(SCREAMING_SNAKE_CASE_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 600_851_475_143 ) -> int:
try:
lowerCAmelCase__ : Union[str, Any] = int(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowerCAmelCase__ : List[Any] = 2
lowerCAmelCase__ : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCAmelCase__ : List[Any] = i
while n % i == 0:
lowerCAmelCase__ : Optional[int] = n // i
i += 1
return int(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase__ : str = i + 1
else:
lowerCAmelCase__ : int = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 307
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 1
|
lowerCamelCase__ = [0, 2, 4, 6, 8]
lowerCamelCase__ = [1, 3, 5, 7, 9]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCAmelCase__ : Tuple = 0
for digit in range(10 ):
lowerCAmelCase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return result
lowerCAmelCase__ : List[str] = 0
for digita in range(10 ):
lowerCAmelCase__ : Any = digita
if (remainder + digita) % 2 == 0:
lowerCAmelCase__ : Any = ODD_DIGITS
else:
lowerCAmelCase__ : Any = EVEN_DIGITS
for digita in other_parity_digits:
lowerCAmelCase__ : Any = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 9 ) -> int:
lowerCAmelCase__ : Union[str, Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(SCREAMING_SNAKE_CASE_ , 0 , [0] * length , SCREAMING_SNAKE_CASE_ )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 307
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 1
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase__ = re.compile(r"""\s+""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE_ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCAmelCase__ : List[Any] = [len(SCREAMING_SNAKE_CASE_ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE_ ), "line_max": max(SCREAMING_SNAKE_CASE_ )}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Optional[Any] = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=5 ) -> str:
lowerCAmelCase__ : Union[str, Any] = ['auto-generated', 'autogenerated', 'automatically generated']
lowerCAmelCase__ : Any = example['content'].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=0.05 ) -> Dict:
lowerCAmelCase__ : int = ['unit tests', 'test file', 'configuration file']
lowerCAmelCase__ : Optional[Any] = example['content'].splitlines()
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : List[str] = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCAmelCase__ : Union[str, Any] = example['content'].count('\n' )
lowerCAmelCase__ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Any = ['def ', 'class ', 'for ', 'while ']
lowerCAmelCase__ : Union[str, Any] = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=4 ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = example['content'].splitlines()
lowerCAmelCase__ : Optional[Any] = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : List[str] = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE_ )['input_ids']
lowerCAmelCase__ : str = len(example['content'] ) / len(SCREAMING_SNAKE_CASE_ )
return {"ratio": ratio}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : List[Any] = {}
results.update(get_hash(SCREAMING_SNAKE_CASE_ ) )
results.update(line_stats(SCREAMING_SNAKE_CASE_ ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE_ ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE_ ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE_ ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE_ ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE_ ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE_ ) )
return results
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if not check_uniques(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE_ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
os.unlink(SCREAMING_SNAKE_CASE_ )
# Settings
lowerCamelCase__ = HfArgumentParser(PreprocessingArguments)
lowerCamelCase__ = parser.parse_args()
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase__ = time.time()
lowerCamelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
lowerCamelCase__ = time.time()
lowerCamelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
lowerCamelCase__ = set(ds.unique("""hash"""))
lowerCamelCase__ = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
lowerCamelCase__ = time.time()
lowerCamelCase__ = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase__ = time.time()
lowerCamelCase__ , lowerCamelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
lowerCamelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
lowerCamelCase__ = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
lowerCamelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase__ = str(data_dir / F"""file-{file_number+1:012}.json""")
lowerCamelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 307
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 1
|
from ... import PretrainedConfig
lowerCamelCase__ = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase = 'nezha'
def __init__( self : Union[str, Any] , a : Tuple=21_128 , a : int=768 , a : Dict=12 , a : Dict=12 , a : Optional[Any]=3_072 , a : List[Any]="gelu" , a : Optional[Any]=0.1 , a : int=0.1 , a : str=512 , a : int=64 , a : int=2 , a : List[str]=0.0_2 , a : Tuple=1E-12 , a : int=0.1 , a : Union[str, Any]=0 , a : Optional[Any]=2 , a : Tuple=3 , a : Union[str, Any]=True , **a : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : str = max_relative_position
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Any = classifier_dropout
lowerCAmelCase__ : Optional[Any] = use_cache
| 307
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
lowerCAmelCase__ : Dict = len(SCREAMING_SNAKE_CASE_ )
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : str = collection[i]
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[int] = i - 1
while low <= high:
lowerCAmelCase__ : Union[str, Any] = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ : Tuple = mid - 1
else:
lowerCAmelCase__ : Optional[int] = mid + 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 ):
lowerCAmelCase__ : Optional[Any] = collection[j - 1]
lowerCAmelCase__ : List[str] = val
return collection
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
import os
import sys
import unittest
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase__ = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
lowerCamelCase__ = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = get_test_to_tester_mapping(a )
lowerCAmelCase__ : Any = get_test_to_tester_mapping(a )
lowerCAmelCase__ : Optional[Any] = {'BertModelTest': 'BertModelTester'}
lowerCAmelCase__ : List[str] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(a ) , a )
self.assertEqual(get_test_info.to_json(a ) , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = get_model_to_test_mapping(a )
lowerCAmelCase__ : Dict = get_model_to_test_mapping(a )
lowerCAmelCase__ : str = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowerCAmelCase__ : List[str] = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(a ) , a )
self.assertEqual(get_test_info.to_json(a ) , a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = get_model_to_tester_mapping(a )
lowerCAmelCase__ : Union[str, Any] = get_model_to_tester_mapping(a )
lowerCAmelCase__ : Union[str, Any] = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowerCAmelCase__ : List[str] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(a ) , a )
self.assertEqual(get_test_info.to_json(a ) , a )
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 1
|
from __future__ import annotations
lowerCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[float]:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : float = -1
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if arr[i] < arr[j]:
lowerCAmelCase__ : List[Any] = arr[j]
break
result.append(SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[float]:
lowerCAmelCase__ : Tuple = []
for i, outer in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCAmelCase__ : Any = inner
break
result.append(SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[float]:
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : list[float] = []
lowerCAmelCase__ : list[float] = [-1] * arr_size
for index in reversed(range(SCREAMING_SNAKE_CASE_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCAmelCase__ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 307
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 1
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase__ = """Usage of script: script_name <size_of_canvas:int>"""
lowerCamelCase__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[list[bool]]:
lowerCAmelCase__ : List[Any] = [[False for i in range(SCREAMING_SNAKE_CASE_ )] for j in range(SCREAMING_SNAKE_CASE_ )]
return canvas
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i, row in enumerate(SCREAMING_SNAKE_CASE_ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[str] = bool(random.getrandbits(1 ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[list[bool]]:
lowerCAmelCase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE_ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[str] = __judge_point(
SCREAMING_SNAKE_CASE_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCAmelCase__ : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCAmelCase__ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCAmelCase__ : int = pt
if pt:
if alive < 2:
lowerCAmelCase__ : List[str] = False
elif alive == 2 or alive == 3:
lowerCAmelCase__ : int = True
elif alive > 3:
lowerCAmelCase__ : str = False
else:
if alive == 3:
lowerCAmelCase__ : Dict = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase__ = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase__ = create_canvas(canvas_size)
seed(c)
lowerCamelCase__ , lowerCamelCase__ = plt.subplots()
fig.show()
lowerCamelCase__ = ListedColormap(["""w""", """k"""])
try:
while True:
lowerCamelCase__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 307
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCamelCase__ = 5
lowerCamelCase__ = 10
@require_sentencepiece
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = SpeechaTextTokenizer
lowercase = False
lowercase = True
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Any = sp.SentencePieceProcessor()
spm_model.Load(a )
lowerCAmelCase__ : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(a ) )]
lowerCAmelCase__ : Optional[int] = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : str = Path(self.tmpdirname )
save_json(a , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a , save_dir / VOCAB_FILES_NAMES['spm_file'] )
lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = '<pad>'
lowerCAmelCase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(a ) , 1_001 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_001 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : str = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowerCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = {'input_ids': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class A__ ( unittest.TestCase ):
lowercase = 'valhalla/s2t_mustc_multilinguial_medium'
lowercase = 'C\'est trop cool'
lowercase = 'Esto es genial'
@classmethod
def _lowerCamelCase ( cls : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 10_000 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowerCAmelCase__ : Optional[int] = [ES_CODE, 4, 1_601, 47, 7_647, 2]
lowerCAmelCase__ : Optional[Any] = self.tokenizer.decode(a , skip_special_tokens=a )
lowerCAmelCase__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'fr'
lowerCAmelCase__ : Tuple = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , a )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ : Any = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 307
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = None
def __init__( self : Optional[Any] , a : Tuple=None , a : int=None , a : int=None , a : Any="<unk>" , a : Tuple="<s>" , a : Optional[Any]="</s>" , a : Optional[int]="<pad>" , a : Dict=False , a : Union[str, Any]=False , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , pad_token=a , add_prefix_space=a , clean_up_tokenization_spaces=a , **a , )
lowerCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , a ) != add_prefix_space:
lowerCAmelCase__ : Any = getattr(a , pre_tok_state.pop('type' ) )
lowerCAmelCase__ : Union[str, Any] = add_prefix_space
lowerCAmelCase__ : Dict = pre_tok_class(**a )
lowerCAmelCase__ : List[str] = add_prefix_space
def _lowerCamelCase ( self : List[str] , *a : List[Any] , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = kwargs.get('is_split_into_words' , a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._batch_encode_plus(*a , **a )
def _lowerCamelCase ( self : List[str] , *a : Optional[Any] , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = kwargs.get('is_split_into_words' , a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._encode_plus(*a , **a )
def _lowerCamelCase ( self : Any , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self._tokenizer.model.save(a , name=a )
return tuple(a )
def _lowerCamelCase ( self : List[Any] , a : "Conversation" ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] )
if len(a ) > self.model_max_length:
lowerCAmelCase__ : Any = input_ids[-self.model_max_length :]
return input_ids
| 307
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCamelCase__ = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
lowerCamelCase__ = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
lowerCamelCase__ = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def _lowerCamelCase ( self : Optional[Any] , a : List[Any] , a : List[Any] , a : Tuple=None , a : Union[str, Any]=True , a : Dict=False ):
'''simple docstring'''
if rouge_types is None:
lowerCAmelCase__ : Optional[Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
lowerCAmelCase__ : List[Any] = rouge_scorer.RougeScorer(rouge_types=a , use_stemmer=a )
if use_aggregator:
lowerCAmelCase__ : Optional[int] = scoring.BootstrapAggregator()
else:
lowerCAmelCase__ : str = []
for ref, pred in zip(a , a ):
lowerCAmelCase__ : Optional[Any] = scorer.score(a , a )
if use_aggregator:
aggregator.add_scores(a )
else:
scores.append(a )
if use_aggregator:
lowerCAmelCase__ : Dict = aggregator.aggregate()
else:
lowerCAmelCase__ : Dict = {}
for key in scores[0]:
lowerCAmelCase__ : List[Any] = [score[key] for score in scores]
return result
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if index == r:
for j in range(SCREAMING_SNAKE_CASE_ ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCAmelCase__ : Union[str, Any] = arr[i]
combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , SCREAMING_SNAKE_CASE_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
# A temporary array to store all combination one by one
lowerCAmelCase__ : List[str] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCamelCase__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Any , a : List[Any] , a : Dict="</s>" , a : Tuple="<unk>" , a : Optional[int]="<pad>" , a : Any=100 , a : List[Any]=None , a : Optional[Dict[str, Any]] = None , a : Optional[int]=True , **a : Union[str, Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ : str = [f'''<extra_id_{i}>''' for i in range(a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase__ : Optional[int] = len(set(filter(lambda a : bool('extra_id' in str(a ) ) , a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
lowerCAmelCase__ : Optional[Any] = legacy
lowerCAmelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , legacy=a , **a , )
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : Dict = extra_ids
lowerCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@staticmethod
def _lowerCamelCase ( a : Optional[Any] , a : str , a : List[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCAmelCase__ : Optional[int] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , a , )
return max_model_length
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a )) + [1]
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda a : bool(re.search(R'<extra_id_\d+>' , a ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return [self._convert_token_to_id(a ) for token in self.get_sentinel_tokens()]
def _lowerCamelCase ( self : Optional[Any] , a : List[int] ):
'''simple docstring'''
if len(a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Any , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self._add_eos_if_not_present(a )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase__ : int = self._add_eos_if_not_present(a )
return token_ids_a + token_ids_a
def __getstate__( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.__dict__.copy()
lowerCAmelCase__ : Union[str, Any] = None
return state
def __setstate__( self : str , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : List[str] , a : "TextInput" , **a : int ):
'''simple docstring'''
if not self.legacy:
lowerCAmelCase__ : Any = SPIECE_UNDERLINE + text.replace(a , ' ' )
return super().tokenize(a , **a )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , **a : Tuple ):
'''simple docstring'''
if not self.legacy:
lowerCAmelCase__ : Dict = text.startswith(a )
if is_first:
lowerCAmelCase__ : Union[str, Any] = text[1:]
lowerCAmelCase__ : Tuple = self.sp_model.encode(a , out_type=a )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(a ):
lowerCAmelCase__ : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _lowerCamelCase ( self : List[Any] , a : List[str] ):
'''simple docstring'''
if token.startswith('<extra_id_' ):
lowerCAmelCase__ : Any = re.match(R'<extra_id_(\d+)>' , a )
lowerCAmelCase__ : Optional[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a )
def _lowerCamelCase ( self : List[str] , a : int ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
lowerCAmelCase__ : int = self.sp_model.IdToPiece(a )
else:
lowerCAmelCase__ : Optional[int] = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def _lowerCamelCase ( self : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : str = ''
lowerCAmelCase__ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(a )
lowerCAmelCase__ : Tuple = False
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self : Union[str, Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowerCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 307
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = tempfile.mkdtemp()
lowerCAmelCase__ : Optional[int] = BlipImageProcessor()
lowerCAmelCase__ : Optional[int] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Optional[int] = InstructBlipProcessor(a , a , a )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : int , **a : Optional[int] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).tokenizer
def _lowerCamelCase ( self : Tuple , **a : List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def _lowerCamelCase ( self : Union[str, Any] , **a : str ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).qformer_tokenizer
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ : Union[str, Any] = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ : Dict = self.get_image_processor(do_normalize=a , padding_value=1.0 )
lowerCAmelCase__ : Optional[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
self.assertIsInstance(processor.qformer_tokenizer , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : int = self.get_image_processor()
lowerCAmelCase__ : int = self.get_tokenizer()
lowerCAmelCase__ : str = self.get_qformer_tokenizer()
lowerCAmelCase__ : Optional[Any] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
lowerCAmelCase__ : Any = self.prepare_image_inputs()
lowerCAmelCase__ : List[str] = image_processor(a , return_tensors='np' )
lowerCAmelCase__ : Any = processor(images=a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.get_image_processor()
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = self.get_qformer_tokenizer()
lowerCAmelCase__ : Optional[int] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
lowerCAmelCase__ : Tuple = 'lower newer'
lowerCAmelCase__ : Any = processor(text=a )
lowerCAmelCase__ : List[str] = tokenizer(a , return_token_type_ids=a )
lowerCAmelCase__ : str = qformer_tokenizer(a , return_token_type_ids=a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.get_image_processor()
lowerCAmelCase__ : Optional[int] = self.get_tokenizer()
lowerCAmelCase__ : List[str] = self.get_qformer_tokenizer()
lowerCAmelCase__ : Union[str, Any] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
lowerCAmelCase__ : Tuple = 'lower newer'
lowerCAmelCase__ : Dict = self.prepare_image_inputs()
lowerCAmelCase__ : List[str] = processor(text=a , images=a )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = self.get_image_processor()
lowerCAmelCase__ : int = self.get_tokenizer()
lowerCAmelCase__ : str = self.get_qformer_tokenizer()
lowerCAmelCase__ : str = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
lowerCAmelCase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ : str = processor.batch_decode(a )
lowerCAmelCase__ : Optional[int] = tokenizer.batch_decode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : Optional[Any] = self.get_qformer_tokenizer()
lowerCAmelCase__ : Dict = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
lowerCAmelCase__ : List[str] = 'lower newer'
lowerCAmelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase__ : str = processor(text=a , images=a )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 307
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 1
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {'do_clean_text': False, 'add_prefix_space': False}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# fmt: off
lowerCAmelCase__ : int = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase__ : Tuple = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowerCAmelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(a ) )
def _lowerCamelCase ( self : Tuple , **a : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowerCAmelCase__ : List[str] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def _lowerCamelCase ( self : Any , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.get_input_output_texts(a )
lowerCAmelCase__ : Tuple = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Any = tokenizer.decode(a , clean_up_tokenization_spaces=a )
return text, ids
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass # TODO add if relevant
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase__ : List[Any] = 'こんにちは、世界。 こんばんは、㔺界。'
lowerCAmelCase__ : List[Any] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowerCAmelCase__ : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
# Testing conversion to ids without special tokens
lowerCAmelCase__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , a )
# Testing conversion to ids with special tokens
lowerCAmelCase__ : Any = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase__ : str = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowerCAmelCase__ : Dict = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowerCAmelCase__ : int = tokenizer.encode(a )
lowerCAmelCase__ : Dict = tokenizer.decode(a )
self.assertEqual(a , a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCAmelCase__ : int = 'こんにちは、世界。'
lowerCAmelCase__ : Optional[int] = 'こんばんは、㔺界。😀'
lowerCAmelCase__ : Dict = 'こんにちは、世界。こんばんは、世界。😀'
lowerCAmelCase__ : int = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase__ : Optional[Any] = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCAmelCase__ : Any = tokenizer.encode(a , prefix_text=a )
lowerCAmelCase__ : List[Any] = tokenizer.decode(a )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(a )
lowerCAmelCase__ : Tuple = tokenizer.decode(a )
self.assertEqual(a , a )
self.assertEqual(a , a )
self.assertEqual(a , a )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCAmelCase__ : Optional[Any] = 'こんにちは、世界。'
lowerCAmelCase__ : int = 'こんばんは、㔺界。😀'
lowerCAmelCase__ : Dict = len(tokenizer.encode(a ) ) - 2
lowerCAmelCase__ : Optional[Any] = len(tokenizer.encode(a ) ) - 2
lowerCAmelCase__ : int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase__ : List[str] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase__ : Optional[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase__ : str = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase__ : List[Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase__ : Any = tokenizer(a , prefix_text=a ).token_type_ids
self.assertListEqual(a , a )
self.assertListEqual(a , a )
self.assertListEqual(a , a )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCAmelCase__ : str = tokenizer.encode('あンいワ' )
lowerCAmelCase__ : Dict = tokenizer.encode('' , prefix_text='あンいワ' )
lowerCAmelCase__ : List[str] = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(a ) , tokenizer.decode(a ) )
self.assertEqual(tokenizer.decode(a ) , tokenizer.decode(a ) )
self.assertNotEqual(a , a )
self.assertNotEqual(a , a )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCAmelCase__ : List[str] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowerCAmelCase__ : Any = tokenizer(a , padding=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.batch_encode_plus(a , padding=a )
# fmt: off
lowerCAmelCase__ : Union[str, Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
lowerCAmelCase__ : int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a )
self.assertListEqual(x_token.token_type_ids , a )
self.assertListEqual(x_token.attention_mask , a )
self.assertListEqual(x_token_a.input_ids , a )
self.assertListEqual(x_token_a.token_type_ids , a )
self.assertListEqual(x_token_a.attention_mask , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = image.size
lowerCAmelCase__ , lowerCAmelCase__ : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
lowerCAmelCase__ : Dict = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0
lowerCAmelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase__ : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
return 2.0 * image - 1.0
class A__ ( __magic_name__ ):
def __init__( self : Optional[int] , a : VQModel , a : UNetaDModel , a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=a , unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : List[str] , a : Union[torch.Tensor, PIL.Image.Image] = None , a : Optional[int] = 1 , a : Optional[int] = 100 , a : Optional[float] = 0.0 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : Any = 1
elif isinstance(a , torch.Tensor ):
lowerCAmelCase__ : Union[str, Any] = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a )}''' )
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : List[str] = preprocess(a )
lowerCAmelCase__ , lowerCAmelCase__ : str = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase__ : int = next(self.unet.parameters() ).dtype
lowerCAmelCase__ : Tuple = randn_tensor(a , generator=a , device=self.device , dtype=a )
lowerCAmelCase__ : Tuple = image.to(device=self.device , dtype=a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(a , device=self.device )
lowerCAmelCase__ : Dict = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ : List[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ : Optional[int] = {}
if accepts_eta:
lowerCAmelCase__ : List[Any] = eta
for t in self.progress_bar(a ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase__ : str = torch.cat([latents, image] , dim=1 )
lowerCAmelCase__ : List[Any] = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowerCAmelCase__ : List[Any] = self.unet(a , a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : Optional[int] = self.scheduler.step(a , a , a , **a ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase__ : List[Any] = self.vqvae.decode(a ).sample
lowerCAmelCase__ : Dict = torch.clamp(a , -1.0 , 1.0 )
lowerCAmelCase__ : Optional[int] = image / 2 + 0.5
lowerCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
return 1 if input_a == input_a else 0
def lowerCAmelCase__ ( ) -> None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 307
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 1
|
import re
import string
import numpy as np
import datasets
lowerCamelCase__ = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
lowerCamelCase__ = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
lowerCamelCase__ = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self : Optional[Any] , a : Tuple , a : Any , a : Tuple=None , a : Any=False , a : Optional[Any]=False , a : List[Any]=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase__ : int = np.array([re.sub(a , '' , a ) for x in predictions] )
lowerCAmelCase__ : List[Any] = np.array([re.sub(a , '' , a ) for x in references] )
else:
lowerCAmelCase__ : int = np.asarray(a )
lowerCAmelCase__ : Tuple = np.asarray(a )
if ignore_case:
lowerCAmelCase__ : List[str] = np.char.lower(a )
lowerCAmelCase__ : Optional[int] = np.char.lower(a )
if ignore_punctuation:
lowerCAmelCase__ : Optional[int] = string.punctuation.maketrans('' , '' , string.punctuation )
lowerCAmelCase__ : Optional[int] = np.char.translate(a , table=a )
lowerCAmelCase__ : Dict = np.char.translate(a , table=a )
if ignore_numbers:
lowerCAmelCase__ : int = string.digits.maketrans('' , '' , string.digits )
lowerCAmelCase__ : Union[str, Any] = np.char.translate(a , table=a )
lowerCAmelCase__ : List[str] = np.char.translate(a , table=a )
lowerCAmelCase__ : Union[str, Any] = predictions == references
return {"exact_match": np.mean(a ) * 100}
| 307
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.