code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
__lowercase= np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ )
else:
__lowercase= np.full((len(lowercase__ ), sequence_length) , lowercase__ )
for i, tensor in enumerate(lowercase__ ):
if padding_side == "right":
if isinstance(lowercase__ , lowercase__ ):
__lowercase= tensor[:sequence_length]
else:
__lowercase= tensor[:sequence_length]
else:
if isinstance(lowercase__ , lowercase__ ):
__lowercase= tensor[:sequence_length]
else:
__lowercase= tensor[:sequence_length]
return out_tensor.tolist()
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= ord(lowercase__ )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
__lowercase= unicodedata.category(lowercase__ )
if cat.startswith('P' ):
return True
return False
@dataclass
class A ( A_ ):
UpperCamelCase_ : PreTrainedTokenizerBase
UpperCamelCase_ : Union[bool, str, PaddingStrategy] =True
UpperCamelCase_ : Optional[int] =None
UpperCamelCase_ : Optional[int] =None
UpperCamelCase_ : int =-100
UpperCamelCase_ : str ="pt"
def _A (self , lowerCAmelCase ):
import torch
__lowercase= 'label' if 'label' in features[0].keys() else 'labels'
__lowercase= [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowercase= self.tokenizer.pad(
lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
__lowercase= torch.tensor(batch['entity_ids'] ).shape[1]
__lowercase= self.tokenizer.padding_side
if padding_side == "right":
__lowercase= [
list(lowerCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase )) for label in labels
]
else:
__lowercase= [
[self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase )) + list(lowerCAmelCase ) for label in labels
]
__lowercase= [feature['ner_tags'] for feature in features]
__lowercase= padding_tensor(lowerCAmelCase , -1 , lowerCAmelCase , lowerCAmelCase )
__lowercase= [feature['original_entity_spans'] for feature in features]
__lowercase= padding_tensor(lowerCAmelCase , (-1, -1) , lowerCAmelCase , lowerCAmelCase )
__lowercase= {k: torch.tensor(lowerCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase = ''''''
lowerCAmelCase = ''''''
lowerCAmelCase = ''''''
lowerCAmelCase = ''''''
def _lowerCamelCase( lowercase__ ) -> None:
'''simple docstring'''
__lowercase= tweepy.OAuthHandler(lowercase__ , lowercase__ )
auth.set_access_token(lowercase__ , lowercase__ )
__lowercase= tweepy.API(lowercase__ )
# initialize a list to hold all the tweepy Tweets
__lowercase= []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase= api.user_timeline(screen_name=lowercase__ , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowercase__ )
# save the id of the oldest tweet less one
__lowercase= alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase__ ) > 0:
print(F'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
__lowercase= api.user_timeline(
screen_name=lowercase__ , count=2_0_0 , max_id=lowercase__ )
# save most recent tweets
alltweets.extend(lowercase__ )
# update the id of the oldest tweet less one
__lowercase= alltweets[-1].id - 1
print(F'...{len(lowercase__ )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase= [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'new_{screen_name}_tweets.csv' , 'w' ) as f:
__lowercase= csv.writer(lowercase__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowercase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 369
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''yjernite/retribert-base-uncased''': 5_1_2,
}
lowerCAmelCase = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class A ( A_ ):
UpperCamelCase_ : Optional[Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[int] =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any =RetriBertTokenizer
UpperCamelCase_ : Any =['''input_ids''', '''attention_mask''']
def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase ) != tokenize_chinese_chars
):
__lowercase= getattr(lowerCAmelCase , normalizer_state.pop('type' ) )
__lowercase= do_lower_case
__lowercase= strip_accents
__lowercase= tokenize_chinese_chars
__lowercase= normalizer_class(**lowerCAmelCase )
__lowercase= do_lower_case
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
__lowercase= [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= [self.sep_token_id]
__lowercase= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 370
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 0
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
"""simple docstring"""
@staticmethod
def _A (*lowerCAmelCase , **lowerCAmelCase ):
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
__lowercase= [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= vqa_pipeline(lowerCAmelCase , top_k=1 )
self.assertEqual(
lowerCAmelCase , [
[{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}],
[{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}],
] , )
@require_torch
def _A (self ):
__lowercase= pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
__lowercase= './tests/fixtures/tests_samples/COCO/000000039769.png'
__lowercase= 'How many cats are there?'
__lowercase= vqa_pipeline(image=lowerCAmelCase , question='How many cats are there?' , top_k=2 )
self.assertEqual(
lowerCAmelCase , [{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}, {'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}] )
__lowercase= vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
lowerCAmelCase , [{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}, {'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}] )
@slow
@require_torch
def _A (self ):
__lowercase= pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
__lowercase= './tests/fixtures/tests_samples/COCO/000000039769.png'
__lowercase= 'How many cats are there?'
__lowercase= vqa_pipeline(image=lowerCAmelCase , question=lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [{'score': 0.87_99, 'answer': '2'}, {'score': 0.2_96, 'answer': '1'}] )
__lowercase= vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [{'score': 0.87_99, 'answer': '2'}, {'score': 0.2_96, 'answer': '1'}] )
__lowercase= vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [[{'score': 0.87_99, 'answer': '2'}, {'score': 0.2_96, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _A (self ):
pass
| 371
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 0
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''vocab.txt'''}
lowerCAmelCase = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
lowerCAmelCase = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= collections.OrderedDict()
with open(lowercase__ , 'r' , encoding='utf-8' ) as reader:
__lowercase= reader.readlines()
for index, token in enumerate(lowercase__ ):
__lowercase= token.rstrip('\n' )
__lowercase= index
return vocab
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase="<unk>" , lowerCAmelCase=2_0_0 ):
__lowercase= vocab
__lowercase= unk_token
__lowercase= max_input_chars_per_word
def _A (self , lowerCAmelCase ):
__lowercase= list(lowerCAmelCase )
if len(lowerCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowercase= 0
__lowercase= []
while start < len(lowerCAmelCase ):
__lowercase= len(lowerCAmelCase )
__lowercase= None
while start < end:
__lowercase= ''.join(chars[start:end] )
if substr in self.vocab:
__lowercase= substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCAmelCase )
__lowercase= end
return sub_tokens
class A ( A_ ):
UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : Optional[int] =False
def __init__(self , lowerCAmelCase , lowerCAmelCase="<d>" , lowerCAmelCase="</d>" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<unk>" , lowerCAmelCase="</n>" , lowerCAmelCase="</_>" , lowerCAmelCase="left" , **lowerCAmelCase , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=lowerCAmelCase , eod_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , unk_token=lowerCAmelCase , line_token=lowerCAmelCase , space_token=lowerCAmelCase , padding_side=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= bod_token
__lowercase= eod_token
__lowercase= load_vocab(lowerCAmelCase )
__lowercase= self.encoder[space_token]
__lowercase= self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowercase= collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase : x[1] ) )
__lowercase= {v: k for k, v in self.encoder.items()}
__lowercase= WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _A (self ):
return self.encoder[self.bod_token]
@property
def _A (self ):
return self.encoder[self.eod_token]
@property
def _A (self ):
return self.encoder["\n"]
@property
def _A (self ):
return len(self.encoder )
def _A (self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A (self , lowerCAmelCase ):
__lowercase= []
for x in jieba.cut(lowerCAmelCase , cut_all=lowerCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCAmelCase ) )
return output_tokens
def _A (self , lowerCAmelCase , **lowerCAmelCase ):
__lowercase= [i for i in token_ids if i >= 0]
__lowercase= [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return token in self.encoder
def _A (self , lowerCAmelCase ):
return "".join(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) )
def _A (self , lowerCAmelCase ):
return self.decoder.get(lowerCAmelCase , self.unk_token )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if os.path.isdir(lowerCAmelCase ):
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__lowercase= (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowercase= 0
if " " in self.encoder:
__lowercase= self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowercase= self.encoder['\n']
del self.encoder["\n"]
__lowercase= collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase : x[1] ) )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__lowercase= token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase ))
return [1] + ([0] * len(lowerCAmelCase ))
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A ( enum.Enum ):
UpperCamelCase_ : Dict =0
UpperCamelCase_ : Any =1
UpperCamelCase_ : List[str] =2
@add_end_docstrings(A_ )
class A ( A_ ):
UpperCamelCase_ : Tuple ='''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowercase= None
if self.model.config.prefix is not None:
__lowercase= self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowercase= self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params )
__lowercase= {**self._preprocess_params, **preprocess_params}
__lowercase= {**self._forward_params, **forward_params}
def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
__lowercase= {}
if prefix is not None:
__lowercase= prefix
if prefix:
__lowercase= self.tokenizer(
lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
__lowercase= prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
__lowercase= handle_long_generation
preprocess_params.update(lowerCAmelCase )
__lowercase= generate_kwargs
__lowercase= {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowercase= ReturnType.TENSORS
if return_type is not None:
__lowercase= return_type
if clean_up_tokenization_spaces is not None:
__lowercase= clean_up_tokenization_spaces
if stop_sequence is not None:
__lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowercase= stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase )
def __call__(self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ):
__lowercase= self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
__lowercase= prompt_text
if handle_long_generation == "hole":
__lowercase= inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowercase= generate_kwargs['max_new_tokens']
else:
__lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowercase= self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowercase= inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowercase= inputs['attention_mask'][:, -keep_length:]
return inputs
def _A (self , lowerCAmelCase , **lowerCAmelCase ):
__lowercase= model_inputs['input_ids']
__lowercase= model_inputs.get('attention_mask' , lowerCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowercase= None
__lowercase= None
__lowercase= 1
else:
__lowercase= input_ids.shape[0]
__lowercase= model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowercase= generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowercase= 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowercase= 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase )
__lowercase= generated_sequence.shape[0]
if self.framework == "pt":
__lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ):
__lowercase= model_outputs['generated_sequence'][0]
__lowercase= model_outputs['input_ids']
__lowercase= model_outputs['prompt_text']
__lowercase= generated_sequence.numpy().tolist()
__lowercase= []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowercase= {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowercase= self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowercase= 0
else:
__lowercase= len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__lowercase= prompt_text + text[prompt_length:]
else:
__lowercase= text[prompt_length:]
__lowercase= {'generated_text': all_text}
records.append(lowerCAmelCase )
return records
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 0
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class A ( A_ ):
def _A (self ):
__lowercase= SMALL_MODEL_IDENTIFIER
__lowercase= 'pt'
__lowercase= 'tf'
def _A (self , lowerCAmelCase ):
__lowercase= AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase )
model_tf.save_pretrained(lowerCAmelCase )
def _A (self ):
__lowercase= 'mock_framework'
# Framework provided - return whatever the user provides
__lowercase= FeaturesManager.determine_framework(self.test_model , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase )
__lowercase= FeaturesManager.determine_framework(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase )
__lowercase= FeaturesManager.determine_framework(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def _A (self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase )
__lowercase= FeaturesManager.determine_framework(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase )
__lowercase= FeaturesManager.determine_framework(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase ):
__lowercase= FeaturesManager.determine_framework(lowerCAmelCase )
def _A (self ):
__lowercase= MagicMock(return_value=lowerCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , lowerCAmelCase ):
__lowercase= FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowercase= MagicMock(return_value=lowerCAmelCase )
with patch('transformers.onnx.features.is_torch_available' , lowerCAmelCase ):
__lowercase= FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
__lowercase= MagicMock(return_value=lowerCAmelCase )
__lowercase= MagicMock(return_value=lowerCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , lowerCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , lowerCAmelCase ):
__lowercase= FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
__lowercase= MagicMock(return_value=lowerCAmelCase )
__lowercase= MagicMock(return_value=lowerCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , lowerCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , lowerCAmelCase ):
with self.assertRaises(lowerCAmelCase ):
__lowercase= FeaturesManager.determine_framework(self.test_model )
| 353
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 0
|
from math import factorial
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= real
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= [1] * rank
else:
__lowercase= rank
def __repr__(self ):
return (
f'{self.real}+'
f'{"+".join(str(lowerCAmelCase )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def _A (self ):
__lowercase= self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCAmelCase )
def __add__(self , lowerCAmelCase ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
return Dual(self.real + other , self.duals )
__lowercase= self.duals.copy()
__lowercase= other.duals.copy()
if len(lowerCAmelCase ) > len(lowerCAmelCase ):
o_dual.extend([1] * (len(lowerCAmelCase ) - len(lowerCAmelCase )) )
elif len(lowerCAmelCase ) < len(lowerCAmelCase ):
s_dual.extend([1] * (len(lowerCAmelCase ) - len(lowerCAmelCase )) )
__lowercase= []
for i in range(len(lowerCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCAmelCase )
UpperCamelCase_ : int =__add__
def __sub__(self , lowerCAmelCase ):
return self + other * -1
def __mul__(self , lowerCAmelCase ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCAmelCase )
__lowercase= [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCAmelCase )
UpperCamelCase_ : Tuple =__mul__
def __truediv__(self , lowerCAmelCase ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCAmelCase )
raise ValueError
def __floordiv__(self , lowerCAmelCase ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCAmelCase )
raise ValueError
def __pow__(self , lowerCAmelCase ):
if n < 0 or isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
__lowercase= self
for _ in range(n - 1 ):
x *= self
return x
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
if not callable(lowercase__ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(lowercase__ , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('differentiate() requires an int as input for order' )
__lowercase= Dual(lowercase__ , 1 )
__lowercase= func(lowercase__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 354
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 0
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase="None" , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= relative_attention
__lowercase= position_biased_input
__lowercase= pos_att_type
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _A (self , lowerCAmelCase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )[0]
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase )[0]
__lowercase= model(lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DebertaVaForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DebertaVaForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : List[Any] =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] =(
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Union[str, Any] =True
UpperCamelCase_ : int =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Dict =False
UpperCamelCase_ : List[str] =False
def _A (self ):
__lowercase= DebertaVaModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= DebertaVaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def _A (self ):
pass
@slow
def _A (self ):
__lowercase= DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__lowercase= torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
# compare the actual values for a slice.
__lowercase= torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 355
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 0
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _lowerCamelCase( lowercase__ , lowercase__=1_0 ) -> Any:
'''simple docstring'''
__lowercase= []
for _ in range(lowercase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _lowerCamelCase( lowercase__ , lowercase__=1_0 ) -> Any:
'''simple docstring'''
__lowercase= []
for step in range(lowercase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase= os.path.join(lowercase__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , lowercase__ )
__lowercase= torch.load(lowercase__ )
scheduler.load_state_dict(lowercase__ )
return lrs
@require_torch
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for a, b in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertAlmostEqual(lowerCAmelCase , lowerCAmelCase , delta=lowerCAmelCase )
def _A (self ):
__lowercase= torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase )
__lowercase= torch.tensor([0.4, 0.2, -0.5] )
__lowercase= nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowercase= AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
__lowercase= criterion(lowerCAmelCase , lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _A (self ):
__lowercase= torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase )
__lowercase= torch.tensor([0.4, 0.2, -0.5] )
__lowercase= nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowercase= Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase , weight_decay=0.0 , relative_step=lowerCAmelCase , scale_parameter=lowerCAmelCase , warmup_init=lowerCAmelCase , )
for _ in range(1_0_0_0 ):
__lowercase= criterion(lowerCAmelCase , lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class A ( unittest.TestCase ):
UpperCamelCase_ : Optional[Any] =nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCamelCase_ : Union[str, Any] =AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCamelCase_ : str =10
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for a, b in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertAlmostEqual(lowerCAmelCase , lowerCAmelCase , delta=lowerCAmelCase , msg=lowerCAmelCase )
def _A (self ):
__lowercase= {'num_warmup_steps': 2, 'num_training_steps': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowercase= {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
__lowercase, __lowercase= data
__lowercase= scheduler_func(self.optimizer , **lowerCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowercase= unwrap_schedule(lowerCAmelCase , self.num_steps )
self.assertListAlmostEqual(
lowerCAmelCase , lowerCAmelCase , tol=1E-2 , msg=f'failed for {scheduler_func} in normal scheduler' , )
__lowercase= scheduler_func(self.optimizer , **lowerCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase ) # wrap to test picklability of the schedule
__lowercase= unwrap_and_save_reload_schedule(lowerCAmelCase , self.num_steps )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase , msg=f'failed for {scheduler_func} in save and reload' )
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= fn
def __call__(self , *lowerCAmelCase , **lowerCAmelCase ):
return self.fn(*lowerCAmelCase , **lowerCAmelCase )
@classmethod
def _A (self , lowerCAmelCase ):
__lowercase= list(map(self , scheduler.lr_lambdas ) )
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase = logging.getLogger(__name__)
class A ( A_ ):
UpperCamelCase_ : Tuple ='''masked_bert'''
def __init__(self , lowerCAmelCase=3_0_5_2_2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase="topK" , lowerCAmelCase="constant" , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= pruning_method
__lowercase= mask_init
__lowercase= mask_scale
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A ( A_ ):
UpperCamelCase_ : str ='''unispeech'''
def __init__(self , lowerCAmelCase=3_2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase="group" , lowerCAmelCase="gelu" , lowerCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase=False , lowerCAmelCase=1_2_8 , lowerCAmelCase=1_6 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=0.05 , lowerCAmelCase=1_0 , lowerCAmelCase=2 , lowerCAmelCase=0.0 , lowerCAmelCase=1_0 , lowerCAmelCase=0 , lowerCAmelCase=3_2_0 , lowerCAmelCase=2 , lowerCAmelCase=0.1 , lowerCAmelCase=1_0_0 , lowerCAmelCase=2_5_6 , lowerCAmelCase=2_5_6 , lowerCAmelCase=0.1 , lowerCAmelCase="mean" , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2_5_6 , lowerCAmelCase=8_0 , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=0.5 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase )
__lowercase= hidden_size
__lowercase= feat_extract_norm
__lowercase= feat_extract_activation
__lowercase= list(lowerCAmelCase )
__lowercase= list(lowerCAmelCase )
__lowercase= list(lowerCAmelCase )
__lowercase= conv_bias
__lowercase= num_conv_pos_embeddings
__lowercase= num_conv_pos_embedding_groups
__lowercase= len(self.conv_dim )
__lowercase= num_hidden_layers
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= num_attention_heads
__lowercase= hidden_dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= feat_proj_dropout
__lowercase= final_dropout
__lowercase= layerdrop
__lowercase= layer_norm_eps
__lowercase= initializer_range
__lowercase= num_ctc_classes
__lowercase= vocab_size
__lowercase= do_stable_layer_norm
__lowercase= use_weighted_layer_sum
__lowercase= classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase= apply_spec_augment
__lowercase= mask_time_prob
__lowercase= mask_time_length
__lowercase= mask_time_min_masks
__lowercase= mask_feature_prob
__lowercase= mask_feature_length
__lowercase= mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowercase= num_codevectors_per_group
__lowercase= num_codevector_groups
__lowercase= contrastive_logits_temperature
__lowercase= feat_quantizer_dropout
__lowercase= num_negatives
__lowercase= codevector_dim
__lowercase= proj_codevector_dim
__lowercase= diversity_loss_weight
# ctc loss
__lowercase= ctc_loss_reduction
__lowercase= ctc_zero_infinity
# pretraining loss
__lowercase= replace_prob
@property
def _A (self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCAmelCase = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def _lowerCamelCase( lowercase__=True ) -> List[Any]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A_ ) )
class A ( A_ ):
UpperCamelCase_ : Optional[int] =None
UpperCamelCase_ : str =None
def _A (self , lowerCAmelCase , lowerCAmelCase ):
with TemporaryDirectory() as tmp_dir:
__lowercase= dataset_module_factory(lowerCAmelCase , cache_dir=lowerCAmelCase )
__lowercase= import_main_class(dataset_module.module_path , dataset=lowerCAmelCase )
__lowercase= builder_cls(
cache_dir=lowerCAmelCase , config_name=lowerCAmelCase , hash=dataset_module.hash , )
__lowercase= '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
__lowercase= cached_path(lowerCAmelCase , cache_dir=lowerCAmelCase )
self.assertTrue(os.path.exists(lowerCAmelCase ) )
@pytest.mark.integration
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
__lowercase= dataset_module_factory('wikipedia' , cache_dir=lowercase__ )
__lowercase= import_main_class(dataset_module.module_path )
__lowercase= builder_cls(
cache_dir=lowercase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__lowercase= None
builder_instance.download_and_prepare()
__lowercase= builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= dataset_module_factory('wikipedia' , cache_dir=lowercase__ )
__lowercase= import_main_class(dataset_module.module_path , dataset=lowercase__ )
__lowercase= builder_cls(
cache_dir=lowercase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
__lowercase= builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase__ , lowercase__ )
assert "train" in ds
assert isinstance(ds['train'] , lowercase__ )
assert next(iter(ds['train'] ) )
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= SwinConfig()
__lowercase= swin_name.split('_' )
__lowercase= name_split[1]
__lowercase= int(name_split[4] )
__lowercase= int(name_split[3][-1] )
if model_size == "tiny":
__lowercase= 9_6
__lowercase= (2, 2, 6, 2)
__lowercase= (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase= 9_6
__lowercase= (2, 2, 1_8, 2)
__lowercase= (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase= 1_2_8
__lowercase= (2, 2, 1_8, 2)
__lowercase= (4, 8, 1_6, 3_2)
else:
__lowercase= 1_9_2
__lowercase= (2, 2, 1_8, 2)
__lowercase= (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
__lowercase= 2_1_8_4_1
else:
__lowercase= 1_0_0_0
__lowercase= 'huggingface/label-files'
__lowercase= 'imagenet-1k-id2label.json'
__lowercase= json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
__lowercase= {int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase= idalabel
__lowercase= {v: k for k, v in idalabel.items()}
__lowercase= img_size
__lowercase= num_classes
__lowercase= embed_dim
__lowercase= depths
__lowercase= num_heads
__lowercase= window_size
return config
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
if "patch_embed.proj" in name:
__lowercase= name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase= name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase= 'encoder.' + name
if "attn.proj" in name:
__lowercase= name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowercase= name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowercase= name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowercase= name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowercase= name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase= name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
__lowercase= 'layernorm.weight'
if name == "norm.bias":
__lowercase= 'layernorm.bias'
if "head" in name:
__lowercase= name.replace('head' , 'classifier' )
else:
__lowercase= 'swin.' + name
return name
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowercase= orig_state_dict.pop(lowercase__ )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase= key.split('.' )
__lowercase= int(key_split[1] )
__lowercase= int(key_split[3] )
__lowercase= model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase= val[:dim, :]
__lowercase= val[
dim : dim * 2, :
]
__lowercase= val[-dim:, :]
else:
__lowercase= val[
:dim
]
__lowercase= val[
dim : dim * 2
]
__lowercase= val[
-dim:
]
else:
__lowercase= val
return orig_state_dict
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
__lowercase= get_swin_config(lowercase__ )
__lowercase= SwinForImageClassification(lowercase__ )
model.eval()
__lowercase= convert_state_dict(timm_model.state_dict() , lowercase__ )
model.load_state_dict(lowercase__ )
__lowercase= 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase= AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
__lowercase= image_processor(images=lowercase__ , return_tensors='pt' )
__lowercase= timm_model(inputs['pixel_values'] )
__lowercase= model(**lowercase__ ).logits
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 360
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 0
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=3.6 ):
__lowercase= tokenizer
__lowercase= tokenizer.bos_token_id
__lowercase= dataset
__lowercase= seq_length
__lowercase= seq_length * chars_per_token * num_of_sequences
def __iter__(self ):
__lowercase= iter(self.dataset )
__lowercase= True
while more_examples:
__lowercase, __lowercase= [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowercase= False
break
__lowercase= tokenizer(lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCAmelCase ) , self.seq_length ):
__lowercase= all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase ) == self.seq_length:
yield torch.tensor(lowerCAmelCase )
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= {'streaming': True}
__lowercase= load_dataset(args.dataset_name , split='train' , **lowercase__ )
__lowercase= ConstantLengthDataset(lowercase__ , lowercase__ , seq_length=args.seq_length )
__lowercase= DataLoader(lowercase__ , batch_size=args.batch_size )
return eval_dataloader
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
model.eval()
__lowercase= []
for step, batch in enumerate(lowercase__ ):
with torch.no_grad():
__lowercase= model(lowercase__ , labels=lowercase__ )
__lowercase= outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowercase= torch.mean(torch.cat(lowercase__ ) )
try:
__lowercase= torch.exp(lowercase__ )
except OverflowError:
__lowercase= float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase = Accelerator()
# Parse configuration
lowerCAmelCase = HfArgumentParser(EvaluationArguments)
lowerCAmelCase = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase ,lowerCAmelCase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
lowerCAmelCase ,lowerCAmelCase = evaluate(args)
logger.info(F'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 362
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A ( A_ ):
UpperCamelCase_ : Any ='''trocr'''
UpperCamelCase_ : int =['''past_key_values''']
UpperCamelCase_ : str ={
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= d_model
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= activation_function
__lowercase= max_position_embeddings
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= init_std
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= scale_embedding
__lowercase= use_learned_position_embeddings
__lowercase= layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 364
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=1_8 , lowerCAmelCase=3_0 , lowerCAmelCase=4_0_0 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=False , ):
__lowercase= size if size is not None else {'height': 2_0, 'width': 2_0}
__lowercase= crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__lowercase= parent
__lowercase= batch_size
__lowercase= num_channels
__lowercase= image_size
__lowercase= min_resolution
__lowercase= max_resolution
__lowercase= do_resize
__lowercase= size
__lowercase= do_center_crop
__lowercase= crop_size
__lowercase= do_normalize
__lowercase= image_mean
__lowercase= image_std
__lowercase= do_reduce_labels
def _A (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__lowercase= Image.open(dataset[0]['file'] )
__lowercase= Image.open(dataset[1]['file'] )
return image, map
def _lowerCamelCase( ) -> Dict:
'''simple docstring'''
__lowercase= load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__lowercase= Image.open(ds[0]['file'] )
__lowercase= Image.open(ds[1]['file'] )
__lowercase= Image.open(ds[2]['file'] )
__lowercase= Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class A ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =BeitImageProcessor if is_vision_available() else None
def _A (self ):
__lowercase= BeitImageProcessingTester(self )
@property
def _A (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A (self ):
__lowercase= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_std' ) )
def _A (self ):
__lowercase= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 2_0, 'width': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
__lowercase= self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=lowerCAmelCase )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
def _A (self ):
pass
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
__lowercase= []
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test batched
__lowercase= image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
__lowercase, __lowercase= prepare_semantic_single_inputs()
__lowercase= image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test batched input (PIL images)
__lowercase, __lowercase= prepare_semantic_batch_inputs()
__lowercase= image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__lowercase, __lowercase= prepare_semantic_single_inputs()
__lowercase= image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 1_5_0 )
__lowercase= True
__lowercase= image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
import numpy as np
from transformers import Pipeline
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= np.max(lowercase__ , axis=-1 , keepdims=lowercase__ )
__lowercase= np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ )
class A ( A_ ):
def _A (self , **lowerCAmelCase ):
__lowercase= {}
if "second_text" in kwargs:
__lowercase= kwargs['second_text']
return preprocess_kwargs, {}, {}
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
return self.tokenizer(lowerCAmelCase , text_pair=lowerCAmelCase , return_tensors=self.framework )
def _A (self , lowerCAmelCase ):
return self.model(**lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= model_outputs.logits[0].numpy()
__lowercase= softmax(lowerCAmelCase )
__lowercase= np.argmax(lowerCAmelCase )
__lowercase= self.model.config.idalabel[best_class]
__lowercase= probabilities[best_class].item()
__lowercase= logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 366
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 0
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCAmelCase = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase__ )
return [m.group(0 ) for m in matches]
def _lowerCamelCase( ) -> List[Any]:
'''simple docstring'''
__lowercase= transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowercase= {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__lowercase= collections.defaultdict(lowercase__ )
__lowercase= collections.defaultdict(lowercase__ )
__lowercase= collections.defaultdict(lowercase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase__ ):
__lowercase= None
if _re_tf_models.match(lowercase__ ) is not None:
__lowercase= tf_models
__lowercase= _re_tf_models.match(lowercase__ ).groups()[0]
elif _re_flax_models.match(lowercase__ ) is not None:
__lowercase= flax_models
__lowercase= _re_flax_models.match(lowercase__ ).groups()[0]
elif _re_pt_models.match(lowercase__ ) is not None:
__lowercase= pt_models
__lowercase= _re_pt_models.match(lowercase__ ).groups()[0]
if lookup_dict is not None:
while len(lowercase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__lowercase= True
break
# Try again after removing the last word in the name
__lowercase= ''.join(camel_case_split(lowercase__ )[:-1] )
__lowercase= set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__lowercase= list(lowercase__ )
all_models.sort()
__lowercase= {'model_type': all_models}
__lowercase= [pt_models[t] for t in all_models]
__lowercase= [tf_models[t] for t in all_models]
__lowercase= [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__lowercase= {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__lowercase= 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__lowercase= 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__lowercase= 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__lowercase= 'AutoTokenizer'
__lowercase= [processors[t] for t in all_models]
return pd.DataFrame(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__lowercase= [model_mapping, F'TF_{model_mapping}', F'FLAX_{model_mapping}']
__lowercase= [auto_class, F'TF_{auto_class}', F'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase__ , lowercase__ , lowercase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase__ , lowercase__ ):
continue
# First extract all model_names
__lowercase= []
for name in getattr(lowercase__ , lowercase__ ).values():
if isinstance(lowercase__ , lowercase__ ):
model_names.append(lowercase__ )
else:
model_names.extend(list(lowercase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= get_frameworks_table()
__lowercase= Dataset.from_pandas(lowercase__ )
__lowercase= hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowercase__ )
__lowercase= Dataset.from_json(lowercase__ )
__lowercase= {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowercase__ ) )
}
__lowercase= update_pipeline_and_auto_class_table(lowercase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__lowercase= sorted(table.keys() )
__lowercase= pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__lowercase= Dataset.from_pandas(lowercase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase__ , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowercase__ , 'pipeline_tags.json' ) )
if commit_sha is not None:
__lowercase= (
F'Update with commit {commit_sha}\n\nSee: '
F'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
__lowercase= 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowercase__ , repo_type='dataset' , token=lowercase__ , commit_message=lowercase__ , )
def _lowerCamelCase( ) -> Any:
'''simple docstring'''
__lowercase= {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__lowercase= transformers_module.pipelines.SUPPORTED_TASKS
__lowercase= []
for key in pipeline_tasks:
if key not in in_table:
__lowercase= pipeline_tasks[key]['pt']
if isinstance(lowercase__ , (list, tuple) ):
__lowercase= model[0]
__lowercase= model.__name__
if model not in in_table.values():
missing.append(lowercase__ )
if len(lowercase__ ) > 0:
__lowercase= ', '.join(lowercase__ )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
lowerCAmelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=3 , lowerCAmelCase=3_2 , lowerCAmelCase=3 , lowerCAmelCase=1_0 , lowerCAmelCase=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase=[1, 1, 2, 1] , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=3 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= num_channels
__lowercase= embeddings_size
__lowercase= hidden_sizes
__lowercase= depths
__lowercase= is_training
__lowercase= use_labels
__lowercase= hidden_act
__lowercase= num_labels
__lowercase= scope
__lowercase= len(lowerCAmelCase )
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= TFRegNetModel(config=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , training=lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= TFRegNetForImageClassification(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Dict =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase_ : Union[str, Any] =(
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : int =False
UpperCamelCase_ : Dict =False
def _A (self ):
__lowercase= TFRegNetModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def _A (self ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def _A (self ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) , training=lowerCAmelCase )
__lowercase= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase= self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
__lowercase= ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowercase= layer_type
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase={} ):
__lowercase= model(lowerCAmelCase , return_dict=lowerCAmelCase , **lowerCAmelCase )
__lowercase= model(lowerCAmelCase , return_dict=lowerCAmelCase , **lowerCAmelCase ).to_tuple()
def recursive_check(lowerCAmelCase , lowerCAmelCase ):
if isinstance(lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase , lowerCAmelCase ):
recursive_check(lowerCAmelCase , lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase , lowerCAmelCase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(lowerCAmelCase , lowerCAmelCase )
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {'output_hidden_states': True} )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {'output_hidden_states': True} )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= TFRegNetModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _A (self ):
__lowercase= TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='tf' )
# forward pass
__lowercase= model(**lowerCAmelCase , training=lowerCAmelCase )
# verify the logits
__lowercase= tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 )
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase, __lowercase= 1, 1
__lowercase= []
for i in range(1 , n + 1 ):
__lowercase= prev_numerator + 2 * prev_denominator
__lowercase= prev_numerator + prev_denominator
if len(str(lowercase__ ) ) > len(str(lowercase__ ) ):
result.append(lowercase__ )
__lowercase= numerator
__lowercase= denominator
return len(lowercase__ )
if __name__ == "__main__":
print(F'{solution() = }')
| 369
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 0
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 370
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 0
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A :
"""simple docstring"""
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=0.2 , lowerCAmelCase=0.2 ):
__lowercase= bp_numa
__lowercase= bp_numa
__lowercase= bp_numa
__lowercase= conva_get[:2]
__lowercase= conva_get[2]
__lowercase= size_pa
__lowercase= rate_w
__lowercase= rate_t
__lowercase= [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__lowercase= np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowercase= np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowercase= -2 * np.random.rand(self.conva[1] ) + 1
__lowercase= -2 * np.random.rand(self.num_bpa ) + 1
__lowercase= -2 * np.random.rand(self.num_bpa ) + 1
def _A (self , lowerCAmelCase ):
# save model dict with pickle
__lowercase= {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(lowerCAmelCase , 'wb' ) as f:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
print(f'Model saved: {save_path}' )
@classmethod
def _A (cls , lowerCAmelCase ):
# read saved model
with open(lowerCAmelCase , 'rb' ) as f:
__lowercase= pickle.load(lowerCAmelCase ) # noqa: S301
__lowercase= model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
__lowercase= model_dic.get('size_pooling1' )
__lowercase= model_dic.get('num_bp1' )
__lowercase= model_dic.get('num_bp2' )
__lowercase= model_dic.get('num_bp3' )
__lowercase= model_dic.get('rate_weight' )
__lowercase= model_dic.get('rate_thre' )
# create model instance
__lowercase= CNN(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# modify model parameter
__lowercase= model_dic.get('w_conv1' )
__lowercase= model_dic.get('wkj' )
__lowercase= model_dic.get('vji' )
__lowercase= model_dic.get('thre_conv1' )
__lowercase= model_dic.get('thre_bp2' )
__lowercase= model_dic.get('thre_bp3' )
return conv_ins
def _A (self , lowerCAmelCase ):
return 1 / (1 + np.exp(-1 * x ))
def _A (self , lowerCAmelCase ):
return round(lowerCAmelCase , 3 )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# convolution process
__lowercase= convs[0]
__lowercase= convs[1]
__lowercase= np.shape(lowerCAmelCase )[0]
# get the data slice of original image data, data_focus
__lowercase= []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase ):
__lowercase= data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
__lowercase= []
__lowercase= int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCAmelCase ):
__lowercase= []
for i_focus in range(len(lowerCAmelCase ) ):
__lowercase= (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCAmelCase ) )
__lowercase= np.asmatrix(lowerCAmelCase ).reshape(
lowerCAmelCase , lowerCAmelCase )
data_featuremap.append(lowerCAmelCase )
# expanding the data slice to One dimenssion
__lowercase= []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCAmelCase ) )
__lowercase= np.asarray(lowerCAmelCase )
return focus_list, data_featuremap
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="average_pool" ):
# pooling process
__lowercase= len(featuremaps[0] )
__lowercase= int(size_map / size_pooling )
__lowercase= []
for i_map in range(len(lowerCAmelCase ) ):
__lowercase= featuremaps[i_map]
__lowercase= []
for i_focus in range(0 , lowerCAmelCase , lowerCAmelCase ):
for j_focus in range(0 , lowerCAmelCase , lowerCAmelCase ):
__lowercase= feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCAmelCase ) )
__lowercase= np.asmatrix(lowerCAmelCase ).reshape(lowerCAmelCase , lowerCAmelCase )
featuremap_pooled.append(lowerCAmelCase )
return featuremap_pooled
def _A (self , lowerCAmelCase ):
# expanding three dimension data to one dimension list
__lowercase= []
for i in range(len(lowerCAmelCase ) ):
__lowercase= np.shape(data[i] )
__lowercase= data[i].reshape(1 , shapes[0] * shapes[1] )
__lowercase= data_listed.getA().tolist()[0]
data_expanded.extend(lowerCAmelCase )
__lowercase= np.asarray(lowerCAmelCase )
return data_expanded
def _A (self , lowerCAmelCase ):
# expanding matrix to one dimension list
__lowercase= np.asarray(lowerCAmelCase )
__lowercase= np.shape(lowerCAmelCase )
__lowercase= data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= []
__lowercase= 0
for i_map in range(lowerCAmelCase ):
__lowercase= np.ones((size_map, size_map) )
for i in range(0 , lowerCAmelCase , lowerCAmelCase ):
for j in range(0 , lowerCAmelCase , lowerCAmelCase ):
__lowercase= pd_pool[
i_pool
]
__lowercase= i_pool + 1
__lowercase= np.multiply(
lowerCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCAmelCase )
return pd_all
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=bool ):
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(lowerCAmelCase )) )
print((' - - Shape: Teach_Data ', np.shape(lowerCAmelCase )) )
__lowercase= 0
__lowercase= []
__lowercase= 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
__lowercase= 0
print(f'-------------Learning Time {rp}--------------' )
for p in range(len(lowerCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
__lowercase= np.asmatrix(datas_train[p] )
__lowercase= np.asarray(datas_teach[p] )
__lowercase, __lowercase= self.convolute(
lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase= self.pooling(lowerCAmelCase , self.size_poolinga )
__lowercase= np.shape(lowerCAmelCase )
__lowercase= self._expand(lowerCAmelCase )
__lowercase= data_bp_input
__lowercase= np.dot(lowerCAmelCase , self.vji.T ) - self.thre_bpa
__lowercase= self.sig(lowerCAmelCase )
__lowercase= np.dot(lowerCAmelCase , self.wkj.T ) - self.thre_bpa
__lowercase= self.sig(lowerCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__lowercase= np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCAmelCase , (1 - bp_outa) ) )
__lowercase= np.multiply(
np.dot(lowerCAmelCase , self.wkj ) , np.multiply(lowerCAmelCase , (1 - bp_outa) ) )
__lowercase= np.dot(lowerCAmelCase , self.vji )
__lowercase= pd_i_all / (self.size_poolinga * self.size_poolinga)
__lowercase= pd_conva_pooled.T.getA().tolist()
__lowercase= self._calculate_gradient_from_pool(
lowerCAmelCase , lowerCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__lowercase= self._expand_mat(pd_conva_all[k_conv] )
__lowercase= self.rate_weight * np.dot(lowerCAmelCase , lowerCAmelCase )
__lowercase= self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__lowercase= (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__lowercase= self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__lowercase= self.vji + pd_j_all.T * bp_outa * self.rate_weight
__lowercase= self.thre_bpa - pd_k_all * self.rate_thre
__lowercase= self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__lowercase= np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__lowercase= rp + 1
__lowercase= error_count / patterns
all_mse.append(lowerCAmelCase )
def draw_error():
__lowercase= [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCAmelCase , '+-' )
plt.plot(lowerCAmelCase , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(lowerCAmelCase , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def _A (self , lowerCAmelCase ):
# model predict
__lowercase= []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(lowerCAmelCase )) )
for p in range(len(lowerCAmelCase ) ):
__lowercase= np.asmatrix(datas_test[p] )
__lowercase, __lowercase= self.convolute(
lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase= self.pooling(lowerCAmelCase , self.size_poolinga )
__lowercase= self._expand(lowerCAmelCase )
__lowercase= data_bp_input
__lowercase= bp_outa * self.vji.T - self.thre_bpa
__lowercase= self.sig(lowerCAmelCase )
__lowercase= bp_outa * self.wkj.T - self.thre_bpa
__lowercase= self.sig(lowerCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
__lowercase= [list(map(self.do_round , lowerCAmelCase ) ) for each in produce_out]
return np.asarray(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
# return the data of image after convoluting process so we can check it out
__lowercase= np.asmatrix(lowerCAmelCase )
__lowercase, __lowercase= self.convolute(
lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase= self.pooling(lowerCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 371
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 0
|
import argparse
lowerCAmelCase = '''docs/source/_static/js/custom.js'''
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ , encoding='utf-8' , newline='\n' ) as f:
__lowercase= f.readlines()
__lowercase= 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowercase= F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(lowercase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowerCAmelCase = parser.parse_args()
update_custom_js(args.version)
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= tmp_path_factory.getbasetemp() / 'cache'
__lowercase= test_hf_cache_home / 'datasets'
__lowercase= test_hf_cache_home / 'metrics'
__lowercase= test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(lowercase__ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(lowercase__ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(lowercase__ ) )
__lowercase= test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(lowercase__ ) )
__lowercase= test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowercase__ ) )
@pytest.fixture(autouse=lowercase__ , scope='session' )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase__ )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , lowercase__ )
@pytest.fixture
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , lowercase__ )
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
lowerCAmelCase = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= list(state_dict.keys() )
for name in state_dict_keys:
__lowercase= state_dict.pop(lowercase__ )
# emb -> embedding
if name.startswith('emb.' ):
__lowercase= name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__lowercase= name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__lowercase= re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , lowercase__ )
# ffn -> feed_forward
__lowercase= re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , lowercase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__lowercase= name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__lowercase= name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__lowercase= name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__lowercase= 'rwkv.' + name
__lowercase= weight
return state_dict
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=None ) -> Union[str, Any]:
'''simple docstring'''
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__lowercase= 5_0_2_7_7
__lowercase= AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__lowercase= PreTrainedTokenizerFast(tokenizer_file=lowercase__ )
__lowercase= len(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
# 2. Build the config
__lowercase= list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowercase= candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
__lowercase= RwkvConfig(
vocab_size=lowercase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase__ )
# 3. Download model file then convert state_dict
__lowercase= hf_hub_download(lowercase__ , lowercase__ )
__lowercase= torch.load(lowercase__ , map_location='cpu' )
__lowercase= convert_state_dict(lowercase__ )
# 4. Split in shards and save
__lowercase, __lowercase= shard_checkpoint(lowercase__ )
for shard_file, shard in shards.items():
torch.save(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
if index is not None:
__lowercase= os.path.join(lowercase__ , lowercase__ )
# Save the index as well
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
__lowercase= json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '\n'
f.write(lowercase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__lowercase= list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowercase= torch.load(os.path.join(lowercase__ , lowercase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase__ , lowercase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__lowercase= AutoModelForCausalLM.from_pretrained(lowercase__ )
model.push_to_hub(lowercase__ , max_shard_size='2GB' )
tokenizer.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 352
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class A ( A_ ):
UpperCamelCase_ : str ='''speech_to_text'''
UpperCamelCase_ : List[Any] =['''past_key_values''']
UpperCamelCase_ : Any ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=1_0_0_0_0 , lowerCAmelCase=1_2 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=4 , lowerCAmelCase=6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=4 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=2_5_6 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=6_0_0_0 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=2 , lowerCAmelCase=(5, 5) , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=8_0 , lowerCAmelCase=1 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase= max_source_positions
__lowercase= max_target_positions
__lowercase= num_conv_layers
__lowercase= list(lowerCAmelCase )
__lowercase= conv_channels
__lowercase= input_feat_per_channel
__lowercase= input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 353
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A :
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A_ )} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase_ : str =field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _A (self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class A :
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase_ : Optional[int] =field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase_ : Optional[int] =field(
default=A_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase_ : Optional[int] =field(
default=A_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase_ : float =field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _A (self ):
if self.train_file is not None:
__lowercase= self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowercase= self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
with open(lowercase__ , 'r' , encoding='utf-8' ) as f:
__lowercase= [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())]
assert len(lowercase__ ) == len(lowercase__ )
__lowercase= {c: dataset[c] for c in dataset.column_names}
__lowercase= refs
return Dataset.from_dict(lowercase__ )
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase, __lowercase, __lowercase= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowercase= None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase= get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowercase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowercase= load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowercase= load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
__lowercase= load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
__lowercase= {}
if data_args.train_file is not None:
__lowercase= data_args.train_file
if data_args.validation_file is not None:
__lowercase= data_args.validation_file
__lowercase= data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowercase= 'text'
__lowercase= load_dataset(lowercase__ , data_files=lowercase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowercase= AutoConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
__lowercase= AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__lowercase= CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
__lowercase= {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowercase= AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase__ )
elif model_args.model_name_or_path:
__lowercase= AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowercase= AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__lowercase= AutoModelForMaskedLM.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowercase= datasets['train'].column_names
else:
__lowercase= datasets['validation'].column_names
__lowercase= 'text' if 'text' in column_names else column_names[0]
__lowercase= 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowercase__ ):
# Remove empty lines
__lowercase= [line for line in examples['text'] if len(lowercase__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=lowercase__ , truncation=lowercase__ , max_length=data_args.max_seq_length )
__lowercase= datasets.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowercase= add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowercase= add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowercase= data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowercase= False
# Data collator
# This one will take care of randomly masking the tokens.
__lowercase= DataCollatorForWholeWordMask(tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase= Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowercase= last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowercase= model_args.model_name_or_path
else:
__lowercase= None
__lowercase= trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowercase= os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= math.exp(eval_output['eval_loss'] )
__lowercase= perplexity
__lowercase= os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 354
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 0
|
from collections import deque
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= process_name # process name
__lowercase= arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowercase= arrival_time
__lowercase= burst_time # remaining burst time
__lowercase= 0 # total time of the process wait in ready queue
__lowercase= 0 # time from arrival time to completion time
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# total number of mlfq's queues
__lowercase= number_of_queues
# time slice of queues that round robin algorithm applied
__lowercase= time_slices
# unfinished process is in this ready_queue
__lowercase= queue
# current time
__lowercase= current_time
# finished process is in this sequence queue
__lowercase= deque()
def _A (self ):
__lowercase= []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _A (self , lowerCAmelCase ):
__lowercase= []
for i in range(len(lowerCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _A (self , lowerCAmelCase ):
__lowercase= []
for i in range(len(lowerCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _A (self , lowerCAmelCase ):
__lowercase= []
for i in range(len(lowerCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _A (self , lowerCAmelCase ):
return [q.burst_time for q in queue]
def _A (self , lowerCAmelCase ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _A (self , lowerCAmelCase ):
__lowercase= deque() # sequence deque of finished process
while len(lowerCAmelCase ) != 0:
__lowercase= ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowercase= 0
# set the process's turnaround time because it is finished
__lowercase= self.current_time - cp.arrival_time
# set the completion time
__lowercase= self.current_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase )
self.finish_queue.extend(lowerCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCAmelCase ) ):
__lowercase= ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowercase= self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowercase= 0
# set the finish time
__lowercase= self.current_time
# update the process' turnaround time because it is finished
__lowercase= self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase )
self.finish_queue.extend(lowerCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _A (self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__lowercase, __lowercase= self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase = Process('''P1''', 0, 5_3)
lowerCAmelCase = Process('''P2''', 0, 1_7)
lowerCAmelCase = Process('''P3''', 0, 6_8)
lowerCAmelCase = Process('''P4''', 0, 2_4)
lowerCAmelCase = 3
lowerCAmelCase = [1_7, 2_5]
lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase = Process('''P1''', 0, 5_3)
lowerCAmelCase = Process('''P2''', 0, 1_7)
lowerCAmelCase = Process('''P3''', 0, 6_8)
lowerCAmelCase = Process('''P4''', 0, 2_4)
lowerCAmelCase = 3
lowerCAmelCase = [1_7, 2_5]
lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 355
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 0
|
from __future__ import annotations
import math
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
)
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__lowercase= math.log(len(lowercase__ ) , 2 )
print(F'Optimal value : {minimax(0 , 0 , lowercase__ , lowercase__ , lowercase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
lowerCAmelCase = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
return torch.atana(lowercase__ , lowercase__ ) / math.pi * 2
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= torch.sin(t * math.pi / 2 ) ** 2
__lowercase= (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase__ , lowercase__ )
class A ( A_ ):
pass
class A ( nn.Module ):
def __init__(self , lowerCAmelCase ):
super().__init__()
__lowercase= DiffusionAttnUnetaD(lowerCAmelCase , n_attn_layers=4 )
__lowercase= deepcopy(self.diffusion )
__lowercase= torch.quasirandom.SobolEngine(1 , scramble=lowerCAmelCase )
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= MODELS_MAP[model_name]['url']
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
lowerCAmelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
return name.replace(lowercase__ , lowercase__ )
elif name.startswith(lowercase__ ):
return [name.replace(lowercase__ , lowercase__ ) for v in value]
raise ValueError(F'Attn error with {name}' )
def _lowerCamelCase( lowercase__ , lowercase__=1_3 ) -> Dict:
'''simple docstring'''
__lowercase= input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
__lowercase= 0
if string.startswith('net.3.' ):
depth += 1
__lowercase= string[6:]
elif string.startswith('net.' ):
__lowercase= string[4:]
while string.startswith('main.7.' ):
depth += 1
__lowercase= string[7:]
if string.startswith('main.' ):
__lowercase= string[5:]
# mid block
if string[:2].isdigit():
__lowercase= string[:2]
__lowercase= string[2:]
else:
__lowercase= string[0]
__lowercase= string[1:]
if depth == max_depth:
__lowercase= MID_NUM_TO_LAYER[layer_num]
__lowercase= 'mid_block'
elif depth > 0 and int(lowercase__ ) < 7:
__lowercase= DOWN_NUM_TO_LAYER[layer_num]
__lowercase= F'down_blocks.{depth}'
elif depth > 0 and int(lowercase__ ) > 7:
__lowercase= UP_NUM_TO_LAYER[layer_num]
__lowercase= F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
__lowercase= DEPTH_0_TO_LAYER[layer_num]
__lowercase= F'up_blocks.{max_depth - 1}' if int(lowercase__ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
__lowercase= string_left[1:]
if "resnets" in new_layer:
__lowercase= convert_resconv_naming(lowercase__ )
elif "attentions" in new_layer:
__lowercase= convert_attn_naming(lowercase__ )
__lowercase= new_string_left
if not isinstance(lowercase__ , lowercase__ ):
__lowercase= prefix + '.' + new_layer + '.' + string_left
else:
__lowercase= [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
__lowercase= rename(lowercase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase__ , lowercase__ ):
__lowercase= transform_conv_attns(lowercase__ , lowercase__ , lowercase__ )
else:
__lowercase= v
return new_state_dict
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
if len(lowercase__ ) == 1:
if len(v.shape ) == 3:
# weight
__lowercase= v[:, :, 0]
else:
# bias
__lowercase= v
else:
# qkv matrices
__lowercase= v.shape[0]
__lowercase= trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowercase= v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowercase= v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__lowercase= args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
__lowercase= download(lowercase__ )
__lowercase= MODELS_MAP[model_name]['sample_rate']
__lowercase= MODELS_MAP[model_name]['sample_size']
__lowercase= Object()
__lowercase= sample_size
__lowercase= sample_rate
__lowercase= 0
__lowercase= UNetaDModel(sample_size=lowercase__ , sample_rate=lowercase__ )
__lowercase= diffusers_model.state_dict()
__lowercase= DiffusionUncond(lowercase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowercase__ )['state_dict'] )
__lowercase= orig_model.diffusion_ema.eval()
__lowercase= orig_model.state_dict()
__lowercase= rename_orig_weights(lowercase__ )
__lowercase= set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowercase= set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase__ ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(lowercase__ ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
__lowercase= value.squeeze()
__lowercase= value
diffusers_model.load_state_dict(lowercase__ )
__lowercase= 1_0_0
__lowercase= 3_3
__lowercase= IPNDMScheduler(num_train_timesteps=lowercase__ )
__lowercase= torch.manual_seed(lowercase__ )
__lowercase= torch.randn([1, 2, config.sample_size] , generator=lowercase__ ).to(lowercase__ )
__lowercase= torch.linspace(1 , 0 , steps + 1 , device=lowercase__ )[:-1]
__lowercase= get_crash_schedule(lowercase__ )
__lowercase= DanceDiffusionPipeline(unet=lowercase__ , scheduler=lowercase__ )
__lowercase= torch.manual_seed(3_3 )
__lowercase= pipe(num_inference_steps=lowercase__ , generator=lowercase__ ).audios
__lowercase= sampling.iplms_sample(lowercase__ , lowercase__ , lowercase__ , {} )
__lowercase= generated.clamp(-1 , 1 )
__lowercase= (generated - audio).abs().sum()
__lowercase= (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , lowercase__ )
print('Diff max' , lowercase__ )
assert diff_max < 1E-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase = parser.parse_args()
main(args)
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( A_ ):
@staticmethod
@abstractmethod
def _A (lowerCAmelCase ):
raise NotImplementedError()
@abstractmethod
def _A (self ):
raise NotImplementedError()
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 0
|
from collections.abc import Callable
import numpy as np
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> np.ndarray:
'''simple docstring'''
__lowercase= int(np.ceil((x_end - xa) / step_size ) )
__lowercase= np.zeros((n + 1,) )
__lowercase= ya
__lowercase= xa
for k in range(lowercase__ ):
__lowercase= y[k] + step_size * ode_func(lowercase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
class A :
def __init__(self , lowerCAmelCase = "" , lowerCAmelCase = False ):
# Mapping from the first character of the prefix of the node
__lowercase= {}
# A node will be a leaf if the tree contains its word
__lowercase= is_leaf
__lowercase= prefix
def _A (self , lowerCAmelCase ):
__lowercase= 0
for q, w in zip(self.prefix , lowerCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _A (self , lowerCAmelCase ):
for word in words:
self.insert(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__lowercase= True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowercase= RadixNode(prefix=lowerCAmelCase , is_leaf=lowerCAmelCase )
else:
__lowercase= self.nodes[word[0]]
__lowercase, __lowercase, __lowercase= incoming_node.match(
lowerCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowercase= remaining_prefix
__lowercase= self.nodes[matching_string[0]]
__lowercase= RadixNode(lowerCAmelCase , lowerCAmelCase )
__lowercase= aux_node
if remaining_word == "":
__lowercase= True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= self.nodes.get(word[0] , lowerCAmelCase )
if not incoming_node:
return False
else:
__lowercase, __lowercase, __lowercase= incoming_node.match(
lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= self.nodes.get(word[0] , lowerCAmelCase )
if not incoming_node:
return False
else:
__lowercase, __lowercase, __lowercase= incoming_node.match(
lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowercase= list(self.nodes.values() )[0]
__lowercase= merging_node.is_leaf
self.prefix += merging_node.prefix
__lowercase= merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowercase= False
# If there is 1 edge, we merge it with its child
else:
__lowercase= list(incoming_node.nodes.values() )[0]
__lowercase= merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowercase= merging_node.nodes
return True
def _A (self , lowerCAmelCase = 0 ):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _lowerCamelCase( ) -> bool:
'''simple docstring'''
__lowercase= 'banana bananas bandana band apple all beast'.split()
__lowercase= RadixNode()
root.insert_many(lowercase__ )
assert all(root.find(lowercase__ ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def _lowerCamelCase( ) -> None:
'''simple docstring'''
assert test_trie()
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= RadixNode()
__lowercase= 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowercase__ )
print('Words:' , lowercase__ )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 362
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def _A (self ):
torch.manual_seed(0 )
__lowercase= UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _A (self ):
__lowercase= self.dummy_uncond_unet
__lowercase= KarrasVeScheduler()
__lowercase= KarrasVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe(num_inference_steps=2 , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= torch.manual_seed(0 )
__lowercase= pipe(num_inference_steps=2 , generator=lowerCAmelCase , output_type='numpy' , return_dict=lowerCAmelCase )[0]
__lowercase= image[0, -3:, -3:, -1]
__lowercase= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= 'google/ncsnpp-celebahq-256'
__lowercase= UNetaDModel.from_pretrained(lowerCAmelCase )
__lowercase= KarrasVeScheduler()
__lowercase= KarrasVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe(num_inference_steps=2_0 , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowercase= np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 364
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ , lowercase__=False , lowercase__=False ) -> List[str]:
'''simple docstring'''
__lowercase= 'backbone.' if is_semantic else ''
__lowercase= []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'{prefix}cls_token', 'beit.embeddings.cls_token'),
(F'{prefix}patch_embed.proj.weight', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'{prefix}patch_embed.proj.bias', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'{prefix}pos_embed', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
__lowercase= 'backbone.' if is_semantic else ''
# queries, keys and values
__lowercase= state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' )
__lowercase= state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' )
__lowercase= state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' )
__lowercase= in_proj_weight[
: config.hidden_size, :
]
__lowercase= q_bias
__lowercase= in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase= in_proj_weight[
-config.hidden_size :, :
]
__lowercase= v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase= state_dict.pop(F'{prefix}blocks.{i}.gamma_1' )
__lowercase= state_dict.pop(F'{prefix}blocks.{i}.gamma_2' )
__lowercase= gamma_a
__lowercase= gamma_a
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= dct.pop(lowercase__ )
__lowercase= val
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=False ) -> str:
'''simple docstring'''
__lowercase= False if 'rvlcdip' in checkpoint_url else True
__lowercase= BeitConfig(use_absolute_position_embeddings=lowercase__ , use_mask_token=lowercase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase= 1_0_2_4
__lowercase= 4_0_9_6
__lowercase= 2_4
__lowercase= 1_6
# labels
if "rvlcdip" in checkpoint_url:
__lowercase= 1_6
__lowercase= 'huggingface/label-files'
__lowercase= 'rvlcdip-id2label.json'
__lowercase= json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
__lowercase= {int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase= idalabel
__lowercase= {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase= torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['model']
__lowercase= create_rename_keys(lowercase__ , has_lm_head=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , has_lm_head=lowercase__ )
# load HuggingFace model
__lowercase= BeitForMaskedImageModeling(lowercase__ ) if has_lm_head else BeitForImageClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image
__lowercase= BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowercase__ )
__lowercase= prepare_img()
__lowercase= image_processor(images=lowercase__ , return_tensors='pt' )
__lowercase= encoding['pixel_values']
__lowercase= model(lowercase__ )
__lowercase= outputs.logits
# verify logits
__lowercase= [1, 1_6] if 'rvlcdip' in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(lowercase__ ), "Shape of logits not as expected"
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
if has_lm_head:
__lowercase= 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
__lowercase= 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowercase__ , )
model.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowercase__ , )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 366
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 0
|
"""simple docstring"""
import re
def _lowerCamelCase( lowercase__ ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
__lowercase= split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
try:
__lowercase= split_input(lowercase__ )
if upper:
__lowercase= ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__lowercase= ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return to_simple_case(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
try:
__lowercase= to_simple_case(lowercase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
return to_complex_case(lowercase__ , lowercase__ , '_' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
return to_complex_case(lowercase__ , lowercase__ , '-' )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] ='''swinv2'''
UpperCamelCase_ : List[str] ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , lowerCAmelCase=2_2_4 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=9_6 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 1_2, 2_4] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=3_2 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
__lowercase= image_size
__lowercase= patch_size
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= depths
__lowercase= len(lowerCAmelCase )
__lowercase= num_heads
__lowercase= window_size
__lowercase= mlp_ratio
__lowercase= qkv_bias
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= drop_path_rate
__lowercase= hidden_act
__lowercase= use_absolute_embeddings
__lowercase= layer_norm_eps
__lowercase= initializer_range
__lowercase= encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase= int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
__lowercase= (0, 0, 0, 0)
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class A ( A_ ):
UpperCamelCase_ : Dict ='''deformable_detr'''
UpperCamelCase_ : Optional[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=3 , lowerCAmelCase=3_0_0 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=6 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=8 , lowerCAmelCase=6 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=8 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=2_5_6 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase="sine" , lowerCAmelCase="resnet50" , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=False , lowerCAmelCase=3_0_0 , lowerCAmelCase=False , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.25 , lowerCAmelCase=False , **lowerCAmelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__lowercase= CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= backbone_config.get('model_type' )
__lowercase= CONFIG_MAPPING[backbone_model_type]
__lowercase= config_class.from_dict(lowerCAmelCase )
__lowercase= use_timm_backbone
__lowercase= backbone_config
__lowercase= num_channels
__lowercase= num_queries
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= init_xavier_std
__lowercase= encoder_layerdrop
__lowercase= auxiliary_loss
__lowercase= position_embedding_type
__lowercase= backbone
__lowercase= use_pretrained_backbone
__lowercase= dilation
# deformable attributes
__lowercase= num_feature_levels
__lowercase= encoder_n_points
__lowercase= decoder_n_points
__lowercase= two_stage
__lowercase= two_stage_num_proposals
__lowercase= with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
__lowercase= class_cost
__lowercase= bbox_cost
__lowercase= giou_cost
# Loss coefficients
__lowercase= mask_loss_coefficient
__lowercase= dice_loss_coefficient
__lowercase= bbox_loss_coefficient
__lowercase= giou_loss_coefficient
__lowercase= eos_coefficient
__lowercase= focal_alpha
__lowercase= disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def _A (self ):
return self.encoder_attention_heads
@property
def _A (self ):
return self.d_model
def _A (self ):
__lowercase= copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase= self.backbone_config.to_dict()
__lowercase= self.__class__.model_type
return output
| 369
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class A ( datasets.BuilderConfig ):
UpperCamelCase_ : Optional[datasets.Features] =None
UpperCamelCase_ : str ="utf-8"
UpperCamelCase_ : Optional[str] =None
UpperCamelCase_ : Optional[str] =None
UpperCamelCase_ : bool =True # deprecated
UpperCamelCase_ : Optional[int] =None # deprecated
UpperCamelCase_ : int =10 << 20 # 10MB
UpperCamelCase_ : Optional[bool] =None
class A ( datasets.ArrowBasedBuilder ):
UpperCamelCase_ : List[str] =JsonConfig
def _A (self ):
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
__lowercase= self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def _A (self , lowerCAmelCase ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__lowercase= dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase , (str, list, tuple) ):
__lowercase= data_files
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= [files]
__lowercase= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__lowercase= []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= [files]
__lowercase= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={'files': files} ) )
return splits
def _A (self , lowerCAmelCase ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__lowercase= self.config.features.arrow_schema.field(lowerCAmelCase ).type
__lowercase= pa_table.append_column(lowerCAmelCase , pa.array([None] * len(lowerCAmelCase ) , type=lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowercase= table_cast(lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def _A (self , lowerCAmelCase ):
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowercase= json.load(lowerCAmelCase )
# We keep only the field we are interested in
__lowercase= dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCAmelCase , (list, tuple) ):
__lowercase= set().union(*[row.keys() for row in dataset] )
__lowercase= {col: [row.get(lowerCAmelCase ) for row in dataset] for col in keys}
else:
__lowercase= dataset
__lowercase= pa.Table.from_pydict(lowerCAmelCase )
yield file_idx, self._cast_table(lowerCAmelCase )
# If the file has one json object per line
else:
with open(lowerCAmelCase , 'rb' ) as f:
__lowercase= 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__lowercase= max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
__lowercase= (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__lowercase= f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__lowercase= batch.decode(self.config.encoding , errors=lowerCAmelCase ).encode('utf-8' )
try:
while True:
try:
__lowercase= paj.read_json(
io.BytesIO(lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(lowerCAmelCase )
or block_size > len(lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(lowerCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowercase= json.load(lowerCAmelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCAmelCase , lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
__lowercase= set().union(*[row.keys() for row in dataset] )
__lowercase= {col: [row.get(lowerCAmelCase ) for row in dataset] for col in keys}
__lowercase= pa.Table.from_pydict(lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(lowerCAmelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase )
batch_idx += 1
| 370
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 0
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class A ( A_ , A_ ):
"""simple docstring"""
UpperCamelCase_ : str ='''pixel_values'''
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : List[Any] =TimmBackboneConfig
def __init__(self , lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , 'timm' )
super().__init__(lowerCAmelCase )
__lowercase= config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(lowerCAmelCase , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowercase= getattr(lowerCAmelCase , 'use_pretrained_backbone' , lowerCAmelCase )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowercase= config.out_indices if getattr(lowerCAmelCase , 'out_indices' , lowerCAmelCase ) is not None else (-1,)
__lowercase= timm.create_model(
config.backbone , pretrained=lowerCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase , **lowerCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowercase= self._backbone.return_layers
__lowercase= {layer['module']: str(lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase )
@classmethod
def _A (cls , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowercase= kwargs.pop('config' , TimmBackboneConfig() )
__lowercase= kwargs.pop('use_timm_backbone' , lowerCAmelCase )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowercase= kwargs.pop('num_channels' , config.num_channels )
__lowercase= kwargs.pop('features_only' , config.features_only )
__lowercase= kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowercase= kwargs.pop('out_indices' , config.out_indices )
__lowercase= TimmBackboneConfig(
backbone=lowerCAmelCase , num_channels=lowerCAmelCase , features_only=lowerCAmelCase , use_pretrained_backbone=lowerCAmelCase , out_indices=lowerCAmelCase , )
return super()._from_config(lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase ):
pass
def _A (self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
__lowercase= return_dict if return_dict is not None else self.config.use_return_dict
__lowercase= (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase= output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowercase= self._all_layers
__lowercase= self._backbone(lowerCAmelCase , **lowerCAmelCase )
__lowercase= self._return_layers
__lowercase= tuple(hidden_states[i] for i in self.out_indices )
else:
__lowercase= self._backbone(lowerCAmelCase , **lowerCAmelCase )
__lowercase= None
__lowercase= tuple(lowerCAmelCase )
__lowercase= tuple(lowerCAmelCase ) if hidden_states is not None else None
if not return_dict:
__lowercase= (feature_maps,)
if output_hidden_states:
__lowercase= output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase , hidden_states=lowerCAmelCase , attentions=lowerCAmelCase )
| 371
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__lowercase= tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__lowercase= tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
__lowercase= tf_top_k_top_p_filtering(lowerCAmelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
__lowercase= output[output != -float('inf' )]
__lowercase= tf.cast(
tf.where(tf.not_equal(lowerCAmelCase , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(lowerCAmelCase , lowerCAmelCase , rtol=1E-12 )
tf.debugging.assert_equal(lowerCAmelCase , lowerCAmelCase )
@require_tf
class A ( unittest.TestCase , A_ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
UpperCamelCase_ : List[Any] ={
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _A (self ):
# TF-only test: tf.saved_model export
__lowercase= TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__lowercase= 2
__lowercase= 2
class A ( tf.Module ):
def __init__(self , lowerCAmelCase ):
super(lowerCAmelCase , self ).__init__()
__lowercase= model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=lowerCAmelCase , )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.model.generate(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , max_new_tokens=lowerCAmelCase , return_dict_in_generate=lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
__lowercase= [[2, 0], [1_0_2, 1_0_3]]
__lowercase= [[1, 0], [1, 1]]
__lowercase= DummyModel(model=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCAmelCase , lowerCAmelCase , signatures={'serving_default': dummy_model.serving} )
__lowercase= tf.saved_model.load(lowerCAmelCase ).signatures['serving_default']
for batch_size in range(1 , len(lowerCAmelCase ) + 1 ):
__lowercase= {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
__lowercase= serving_func(**lowerCAmelCase )['sequences']
__lowercase= test_model.generate(**lowerCAmelCase , max_new_tokens=lowerCAmelCase )
tf.debugging.assert_equal(lowerCAmelCase , lowerCAmelCase )
@slow
def _A (self ):
# TF-only test: tf.saved_model export
__lowercase= TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__lowercase= 1
__lowercase= 2
class A ( tf.Module ):
def __init__(self , lowerCAmelCase ):
super(lowerCAmelCase , self ).__init__()
__lowercase= model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=lowerCAmelCase , )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.model.generate(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , max_new_tokens=lowerCAmelCase , return_dict_in_generate=lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
__lowercase= [[2], [1_0_2, 1_0_3]]
__lowercase= [[1], [1, 1]]
__lowercase= DummyModel(model=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCAmelCase , lowerCAmelCase , signatures={'serving_default': dummy_model.serving} )
__lowercase= tf.saved_model.load(lowerCAmelCase ).signatures['serving_default']
for input_row in range(len(lowerCAmelCase ) ):
__lowercase= {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
__lowercase= serving_func(**lowerCAmelCase )['sequences']
__lowercase= test_model.generate(**lowerCAmelCase , max_new_tokens=lowerCAmelCase )
tf.debugging.assert_equal(lowerCAmelCase , lowerCAmelCase )
@slow
@require_tensorflow_text
def _A (self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=lowerCAmelCase )
class A ( tf.keras.layers.Layer ):
def __init__(self ):
super().__init__()
__lowercase= text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(lowerCAmelCase , 'spiece.model' ) , 'rb' ).read() )
__lowercase= TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
__lowercase= self.tokenizer.tokenize(lowerCAmelCase )
__lowercase, __lowercase= text.pad_model_inputs(
lowerCAmelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
__lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )
return self.tokenizer.detokenize(lowerCAmelCase )
__lowercase= CompleteSentenceTransformer()
__lowercase= tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
__lowercase= complete_model(lowerCAmelCase )
__lowercase= tf.keras.Model(lowerCAmelCase , lowerCAmelCase )
keras_model.save(lowerCAmelCase )
def _A (self ):
# Has PT equivalent: this test relies on random sampling
__lowercase= {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 1_0,
'temperature': 0.7,
}
__lowercase= 1_4
__lowercase= AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__lowercase= 'Hello, my dog is cute and'
__lowercase= tokenizer(lowerCAmelCase , return_tensors='tf' )
__lowercase= TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__lowercase= 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
__lowercase= model.generate(**lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__lowercase= [6_3_8, 1_9_8]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
__lowercase= model.generate(**lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _A (self ):
# Has PT equivalent: ample use of framework-specific code
__lowercase= AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
__lowercase= 'Hugging Face is a technology company based in New York and Paris.'
__lowercase= bart_tokenizer(lowerCAmelCase , return_tensors='tf' ).input_ids
__lowercase= TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
__lowercase= bart_model.generate(lowerCAmelCase ).numpy()
class A ( A_ ):
def _A (self , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ):
return super().call(lowerCAmelCase , **lowerCAmelCase )
__lowercase= FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
__lowercase= bart_model.generate(lowerCAmelCase , foo='bar' ).numpy()
self.assertTrue(np.array_equal(lowerCAmelCase , lowerCAmelCase ) )
class A ( bart_model.model.encoder.__class__ ):
def _A (self , lowerCAmelCase , **lowerCAmelCase ):
return super().call(lowerCAmelCase , **lowerCAmelCase )
__lowercase= FakeEncoder(bart_model.config , bart_model.model.shared )
__lowercase= fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__lowercase= bart_model.generate(lowerCAmelCase ).numpy()
with self.assertRaises(lowerCAmelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(lowerCAmelCase , foo='bar' )
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCAmelCase = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def _lowerCamelCase( lowercase__ = "dhaka" , lowercase__ = 5 ) -> int:
'''simple docstring'''
__lowercase= min(lowercase__ , 5_0 ) # Prevent abuse!
__lowercase= {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__lowercase= requests.get('https://www.google.com/search' , params=lowercase__ , headers=lowercase__ )
__lowercase= BeautifulSoup(html.text , 'html.parser' )
__lowercase= ''.join(
re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__lowercase= json.dumps(lowercase__ )
__lowercase= json.loads(lowercase__ )
__lowercase= re.findall(
R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , lowercase__ , )
if not matched_google_image_data:
return 0
__lowercase= re.sub(
R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(lowercase__ ) , )
__lowercase= re.findall(
R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , lowercase__ , )
for index, fixed_full_res_image in enumerate(lowercase__ ):
if index >= max_images:
return index
__lowercase= bytes(lowercase__ , 'ascii' ).decode(
'unicode-escape' )
__lowercase= bytes(lowercase__ , 'ascii' ).decode(
'unicode-escape' )
__lowercase= urllib.request.build_opener()
__lowercase= [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(lowercase__ )
__lowercase= F'query_{query.replace(" " , "_" )}'
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
urllib.request.urlretrieve( # noqa: S310
lowercase__ , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
lowerCAmelCase = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 352
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 0
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= AlbertConfig.from_json_file(lowercase__ )
print(F'Building PyTorch model from configuration: {config}' )
__lowercase= AlbertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 353
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 0
|
from __future__ import annotations
lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def _lowerCamelCase( lowercase__ ) -> list[float]:
'''simple docstring'''
__lowercase= []
__lowercase= len(lowercase__ )
for i in range(lowercase__ ):
__lowercase= -1
for j in range(i + 1 , lowercase__ ):
if arr[i] < arr[j]:
__lowercase= arr[j]
break
result.append(lowercase__ )
return result
def _lowerCamelCase( lowercase__ ) -> list[float]:
'''simple docstring'''
__lowercase= []
for i, outer in enumerate(lowercase__ ):
__lowercase= -1
for inner in arr[i + 1 :]:
if outer < inner:
__lowercase= inner
break
result.append(lowercase__ )
return result
def _lowerCamelCase( lowercase__ ) -> list[float]:
'''simple docstring'''
__lowercase= len(lowercase__ )
__lowercase= []
__lowercase= [-1] * arr_size
for index in reversed(range(lowercase__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__lowercase= stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 354
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 355
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase ), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase ), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
from copy import deepcopy
class A :
def __init__(self , lowerCAmelCase = None , lowerCAmelCase = None ):
if arr is None and size is not None:
__lowercase= size
__lowercase= [0] * size
elif arr is not None:
self.init(lowerCAmelCase )
else:
raise ValueError('Either arr or size must be specified' )
def _A (self , lowerCAmelCase ):
__lowercase= len(lowerCAmelCase )
__lowercase= deepcopy(lowerCAmelCase )
for i in range(1 , self.size ):
__lowercase= self.next_(lowerCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def _A (self ):
__lowercase= self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__lowercase= self.next_(lowerCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _A (lowerCAmelCase ):
return index + (index & (-index))
@staticmethod
def _A (lowerCAmelCase ):
return index - (index & (-index))
def _A (self , lowerCAmelCase , lowerCAmelCase ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__lowercase= self.next_(lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
self.add(lowerCAmelCase , value - self.get(lowerCAmelCase ) )
def _A (self , lowerCAmelCase ):
if right == 0:
return 0
__lowercase= self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__lowercase= self.prev(lowerCAmelCase )
return result
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return self.prefix(lowerCAmelCase ) - self.prefix(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return self.query(lowerCAmelCase , index + 1 )
def _A (self , lowerCAmelCase ):
value -= self.tree[0]
if value < 0:
return -1
__lowercase= 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__lowercase= 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
import tensorflow as tf
from ...tf_utils import shape_list
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1 , lowerCAmelCase=False , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= vocab_size
__lowercase= d_embed
__lowercase= d_proj
__lowercase= cutoffs + [vocab_size]
__lowercase= [0] + self.cutoffs
__lowercase= div_val
__lowercase= self.cutoffs[0]
__lowercase= len(self.cutoffs ) - 1
__lowercase= self.shortlist_size + self.n_clusters
__lowercase= keep_order
__lowercase= []
__lowercase= []
def _A (self , lowerCAmelCase ):
if self.n_clusters > 0:
__lowercase= self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase , name='cluster_weight' )
__lowercase= self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__lowercase= self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase )
else:
self.out_projs.append(lowerCAmelCase )
__lowercase= self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_layers_._{i}_._weight' , )
__lowercase= self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__lowercase, __lowercase= self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase= self.d_embed // (self.div_val**i)
__lowercase= self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase )
__lowercase= self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_layers_._{i}_._weight' , )
__lowercase= self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase )
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
__lowercase= x
if proj is not None:
__lowercase= tf.einsum('ibd,ed->ibe' , lowerCAmelCase , lowerCAmelCase )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase , lowerCAmelCase ) + b
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase ):
__lowercase= shape_list(lowerCAmelCase )
__lowercase= tf.range(lp_size[0] , dtype=target.dtype )
__lowercase= tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True , lowerCAmelCase=False ):
__lowercase= 0
if self.n_clusters == 0:
__lowercase= self._logit(lowerCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__lowercase= tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase , logits=lowerCAmelCase )
__lowercase= tf.nn.log_softmax(lowerCAmelCase , axis=-1 )
else:
__lowercase= shape_list(lowerCAmelCase )
__lowercase= []
__lowercase= tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__lowercase, __lowercase= self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__lowercase= (target >= l_idx) & (target < r_idx)
__lowercase= tf.where(lowerCAmelCase )
__lowercase= tf.boolean_mask(lowerCAmelCase , lowerCAmelCase ) - l_idx
if self.div_val == 1:
__lowercase= self.out_layers[0][0][l_idx:r_idx]
__lowercase= self.out_layers[0][1][l_idx:r_idx]
else:
__lowercase= self.out_layers[i][0]
__lowercase= self.out_layers[i][1]
if i == 0:
__lowercase= tf.concat([cur_W, self.cluster_weight] , 0 )
__lowercase= tf.concat([cur_b, self.cluster_bias] , 0 )
__lowercase= self._logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.out_projs[0] )
__lowercase= tf.nn.log_softmax(lowerCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__lowercase= tf.boolean_mask(lowerCAmelCase , lowerCAmelCase )
__lowercase= self._gather_logprob(lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= self._logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.out_projs[i] )
__lowercase= tf.nn.log_softmax(lowerCAmelCase )
__lowercase= self.cutoffs[0] + i - 1 # No probability for the head cluster
__lowercase= head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase )
if target is not None:
__lowercase= tf.boolean_mask(lowerCAmelCase , lowerCAmelCase )
__lowercase= tf.boolean_mask(lowerCAmelCase , lowerCAmelCase )
__lowercase= self._gather_logprob(lowerCAmelCase , lowerCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase , -cur_logprob , shape_list(lowerCAmelCase ) )
__lowercase= tf.concat(lowerCAmelCase , axis=-1 )
if target is not None:
if return_mean:
__lowercase= tf.reduce_mean(lowerCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
import collections
import os
import re
from pathlib import Path
lowerCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
lowerCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase = re.compile(R'''^\s*else:''')
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
if _re_test_backend.search(lowercase__ ) is None:
return None
__lowercase= [b[0] for b in _re_backend.findall(lowercase__ )]
backends.sort()
return "_and_".join(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase= f.readlines()
__lowercase= 0
while line_index < len(lowercase__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase__ ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase= []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase= lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase__ ):
__lowercase= _re_one_line_import_struct.search(lowercase__ ).groups()[0]
__lowercase= re.findall(R'\[([^\]]+)\]' , lowercase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase= _re_import_struct_key_value.search(lowercase__ )
if single_line_import_search is not None:
__lowercase= [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase= {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase= find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase= None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase= []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase= lines[line_index]
if _re_import_struct_add_one.search(lowercase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase__ ) is not None:
__lowercase= _re_import_struct_add_many.search(lowercase__ ).groups()[0].split(', ' )
__lowercase= [obj[1:-1] for obj in imports if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif _re_between_brackets.search(lowercase__ ) is not None:
__lowercase= _re_between_brackets.search(lowercase__ ).groups()[0].split(', ' )
__lowercase= [obj[1:-1] for obj in imports if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif _re_quote_object.search(lowercase__ ) is not None:
objects.append(_re_quote_object.search(lowercase__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 1_2 + '"' ):
objects.append(line[1_3:-3] )
line_index += 1
__lowercase= objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase= []
while (
line_index < len(lowercase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase= lines[line_index]
__lowercase= _re_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase= {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase= find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase= None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase= []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase= lines[line_index]
__lowercase= _re_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__lowercase= objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]:
'''simple docstring'''
def find_duplicates(lowercase__ ):
return [k for k, v in collections.Counter(lowercase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase= []
for key in import_dict_objects.keys():
__lowercase= find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
__lowercase= find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase= 'base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= []
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
__lowercase= os.path.join(lowercase__ , '__init__.py' )
__lowercase= parse_init(lowercase__ )
if objects is not None:
__lowercase= analyze_results(*lowercase__ )
if len(lowercase__ ) > 0:
__lowercase= F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase__ ) )
if len(lowercase__ ) > 0:
raise ValueError('\n\n'.join(lowercase__ ) )
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= []
for path, directories, files in os.walk(lowercase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase__ ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase= str((Path(lowercase__ ) / folder).relative_to(lowercase__ ) )
__lowercase= short_path.replace(os.path.sep , '.' )
submodules.append(lowercase__ )
for fname in files:
if fname == "__init__.py":
continue
__lowercase= str((Path(lowercase__ ) / fname).relative_to(lowercase__ ) )
__lowercase= short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase__ )
return submodules
lowerCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
from transformers.utils import direct_transformers_import
__lowercase= direct_transformers_import(lowercase__ )
__lowercase= set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowercase__ , '__init__.py' ) , 'r' ) as f:
__lowercase= f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , lowercase__ ) ) )
__lowercase= [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowercase__ ) > 0:
__lowercase= '\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 0
|
def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool:
'''simple docstring'''
__lowercase= len(lowercase__ )
__lowercase= [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase= True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase= False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase= subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase= subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 0
|
def _lowerCamelCase( lowercase__ ) -> list:
'''simple docstring'''
def merge(lowercase__ , lowercase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowercase__ ) <= 1:
return collection
__lowercase= len(lowercase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class A ( A_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase_ : str =field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase_ : ClassVar[Features] =Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
UpperCamelCase_ : ClassVar[Features] =Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
UpperCamelCase_ : str ="question"
UpperCamelCase_ : str ="context"
UpperCamelCase_ : str ="answers"
@property
def _A (self ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 362
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 364
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 0
|
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase = '''docs/source/en/_toctree.yml'''
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= defaultdict(lowercase__ )
__lowercase= []
__lowercase= []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowercase__ )
__lowercase= new_doc_list
__lowercase= [key for key, value in counts.items() if value > 1]
__lowercase= []
for duplicate_key in duplicates:
__lowercase= list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowercase__ ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__lowercase= sorted(lowercase__ , key=lambda lowercase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowercase__ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowercase__ )
# Sort
return overview_doc
def _lowerCamelCase( lowercase__=False ) -> List[str]:
'''simple docstring'''
with open(lowercase__ , encoding='utf-8' ) as f:
__lowercase= yaml.safe_load(f.read() )
# Get to the API doc
__lowercase= 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowercase= content[api_idx]['sections']
# Then to the model doc
__lowercase= 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowercase= api_doc[scheduler_idx]['sections']
__lowercase= clean_doc_toc(lowercase__ )
__lowercase= False
if new_scheduler_doc != scheduler_doc:
__lowercase= True
if overwrite:
__lowercase= new_scheduler_doc
if diff:
if overwrite:
__lowercase= api_doc
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def _lowerCamelCase( lowercase__=False ) -> Union[str, Any]:
'''simple docstring'''
with open(lowercase__ , encoding='utf-8' ) as f:
__lowercase= yaml.safe_load(f.read() )
# Get to the API doc
__lowercase= 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowercase= content[api_idx]['sections']
# Then to the model doc
__lowercase= 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowercase= False
__lowercase= api_doc[pipeline_idx]['sections']
__lowercase= []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowercase= pipeline_doc['section']
__lowercase= clean_doc_toc(lowercase__ )
if overwrite:
__lowercase= new_sub_pipeline_doc
new_pipeline_docs.append(lowercase__ )
# sort overall pipeline doc
__lowercase= clean_doc_toc(lowercase__ )
if new_pipeline_docs != pipeline_docs:
__lowercase= True
if overwrite:
__lowercase= new_pipeline_docs
if diff:
if overwrite:
__lowercase= api_doc
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowercase= model_type_to_module_name(lowercase__ )
__lowercase= importlib.import_module(F'.{module_name}' , 'transformers.models' )
try:
return getattr(lowercase__ , lowercase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowercase__ , '__name__' , lowercase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowercase= importlib.import_module('transformers' )
if hasattr(lowercase__ , lowercase__ ):
return getattr(lowercase__ , lowercase__ )
return None
def _lowerCamelCase( lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , **lowercase__ , ) -> List[str]:
'''simple docstring'''
__lowercase= get_file_from_repo(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(lowercase__ , encoding='utf-8' ) as reader:
return json.load(lowercase__ )
class A :
def __init__(self ):
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase )
def _A (cls , lowerCAmelCase , **lowerCAmelCase ):
__lowercase= kwargs.pop('config' , lowerCAmelCase )
__lowercase= kwargs.pop('trust_remote_code' , lowerCAmelCase )
__lowercase= True
__lowercase, __lowercase= FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase , **lowerCAmelCase )
__lowercase= config_dict.get('feature_extractor_type' , lowerCAmelCase )
__lowercase= None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
__lowercase= config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= AutoConfig.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# It could be in `config.feature_extractor_type``
__lowercase= getattr(lowerCAmelCase , 'feature_extractor_type' , lowerCAmelCase )
if hasattr(lowerCAmelCase , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
__lowercase= config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
__lowercase= feature_extractor_class_from_name(lowerCAmelCase )
__lowercase= feature_extractor_auto_map is not None
__lowercase= feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
__lowercase= resolve_trust_remote_code(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if has_remote_code and trust_remote_code:
__lowercase= get_class_from_dynamic_module(
lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
__lowercase= kwargs.pop('code_revision' , lowerCAmelCase )
if os.path.isdir(lowerCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCAmelCase , **lowerCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCAmelCase , **lowerCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
__lowercase= FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )]
return feature_extractor_class.from_dict(lowerCAmelCase , **lowerCAmelCase )
raise ValueError(
f'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
f'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase ):
FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase , lowerCAmelCase )
| 366
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
from typing import List
import numpy as np
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= {key: len(lowercase__ ) for key, value in gen_kwargs.items() if isinstance(lowercase__ , lowercase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__lowercase= max(lists_lengths.values() , default=0 )
return max(1 , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[range]:
'''simple docstring'''
__lowercase= []
for group_idx in range(lowercase__ ):
__lowercase= num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__lowercase= shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__lowercase= range(lowercase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowercase__ )
return shards_indices_per_group
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[dict]:
'''simple docstring'''
__lowercase= _number_of_shards_in_gen_kwargs(lowercase__ )
if num_shards == 1:
return [dict(lowercase__ )]
else:
__lowercase= _distribute_shards(num_shards=lowercase__ , max_num_jobs=lowercase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowercase__ , lowercase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowercase__ ) )
]
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowercase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _lowerCamelCase( lowercase__ , lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= {len(lowercase__ ) for value in gen_kwargs.values() if isinstance(lowercase__ , lowercase__ )}
__lowercase= {}
for size in list_sizes:
__lowercase= list(range(lowercase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__lowercase= dict(lowercase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowercase__ , lowercase__ ):
__lowercase= [value[i] for i in indices_per_size[len(lowercase__ )]]
return shuffled_kwargs
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
from collections import defaultdict
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowercase= [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCAmelCase ) )
]
__lowercase= defaultdict(lowerCAmelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowercase= (1 << len(lowerCAmelCase )) - 1
def _A (self , lowerCAmelCase , lowerCAmelCase ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowercase= self.count_ways_until(lowerCAmelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowercase= total_ways_util
return self.dp[mask][task_no]
def _A (self , lowerCAmelCase ):
# Store the list of persons for each task
for i in range(len(lowerCAmelCase ) ):
for j in task_performed[i]:
self.task[j].append(lowerCAmelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCAmelCase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCAmelCase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 369
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 0
|
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= ''
for i in table:
res += inp[i - 1]
return res
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return data[1:] + data[0]
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= ''
for i in range(len(lowercase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= int('0b' + data[0] + data[-1] , 2 )
__lowercase= int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= message[:4]
__lowercase= message[4:]
__lowercase= apply_table(lowercase__ , lowercase__ )
__lowercase= xor(lowercase__ , lowercase__ )
__lowercase= apply_sbox(lowercase__ , temp[:4] ) # noqa: E741
__lowercase= apply_sbox(lowercase__ , temp[4:] )
__lowercase= '0' * (2 - len(lowercase__ )) + l # noqa: E741
__lowercase= '0' * (2 - len(lowercase__ )) + r
__lowercase= apply_table(l + r , lowercase__ )
__lowercase= xor(lowercase__ , lowercase__ )
return temp + right
if __name__ == "__main__":
lowerCAmelCase = input('''Enter 10 bit key: ''')
lowerCAmelCase = input('''Enter 8 bit message: ''')
lowerCAmelCase = [6, 3, 7, 4, 8, 5, 1_0, 9]
lowerCAmelCase = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
lowerCAmelCase = [2, 4, 3, 1]
lowerCAmelCase = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase = apply_table(key, paa_table)
lowerCAmelCase = temp[:5]
lowerCAmelCase = temp[5:]
lowerCAmelCase = left_shift(left)
lowerCAmelCase = left_shift(right)
lowerCAmelCase = apply_table(left + right, pa_table)
lowerCAmelCase = left_shift(left)
lowerCAmelCase = left_shift(right)
lowerCAmelCase = left_shift(left)
lowerCAmelCase = left_shift(right)
lowerCAmelCase = apply_table(left + right, pa_table)
# encryption
lowerCAmelCase = apply_table(message, IP)
lowerCAmelCase = function(expansion, sa, sa, keya, temp)
lowerCAmelCase = temp[4:] + temp[:4]
lowerCAmelCase = function(expansion, sa, sa, keya, temp)
lowerCAmelCase = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowerCAmelCase = apply_table(CT, IP)
lowerCAmelCase = function(expansion, sa, sa, keya, temp)
lowerCAmelCase = temp[4:] + temp[:4]
lowerCAmelCase = function(expansion, sa, sa, keya, temp)
lowerCAmelCase = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 370
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 0
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 371
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Optional[Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase__ )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCamelCase_ : Optional[List[str]] =list_field(
default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
int(lowercase__ )
return True
except ValueError:
return False
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
try:
float(lowercase__ )
return True
except ValueError:
return False
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= args
__lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__lowercase= csv.DictReader(lowerCAmelCase )
for row in reader:
__lowercase= row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__lowercase= int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__lowercase= float(row['result'] )
def _A (self ):
__lowercase, __lowercase= plt.subplots()
__lowercase= 'Time usage' if self.args.is_time else 'Memory usage'
__lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) )
__lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) )
__lowercase= self.result_dict[model_name]['result']
((__lowercase), (__lowercase))= (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase= (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase= np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , )
else:
__lowercase= np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase), (__lowercase))= (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )]
plt.scatter(
lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(lowerCAmelCase , lowerCAmelCase , '--' )
title_str += f' {label_model_name} vs.'
__lowercase= title_str[:-4]
__lowercase= 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase )
plt.xlabel(lowerCAmelCase )
plt.ylabel(lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _lowerCamelCase( ) -> Tuple:
'''simple docstring'''
__lowercase= HfArgumentParser(lowercase__ )
__lowercase= parser.parse_args_into_dataclasses()[0]
__lowercase= Plot(args=lowercase__ )
plot.plot()
if __name__ == "__main__":
main()
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class A ( datasets.BuilderConfig ):
UpperCamelCase_ : Optional[datasets.Features] =None
def _lowerCamelCase( lowercase__ , lowercase__ , ) -> List[str]:
'''simple docstring'''
import pyspark
def generate_fn():
__lowercase= df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__lowercase= df_with_partition_id.select('*' ).where(F'part_id = {partition_id}' ).drop('part_id' )
__lowercase= partition_df.collect()
__lowercase= 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class A ( _BaseExamplesIterable ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=None , ):
__lowercase= df
__lowercase= partition_order or range(self.df.rdd.getNumPartitions() )
__lowercase= _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ):
yield from self.generate_examples_fn()
def _A (self , lowerCAmelCase ):
__lowercase= list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.split_shard_indices_by_worker(lowerCAmelCase , lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
@property
def _A (self ):
return len(self.partition_order )
class A ( datasets.DatasetBuilder ):
UpperCamelCase_ : Optional[Any] =SparkConfig
def __init__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
import pyspark
__lowercase= pyspark.sql.SparkSession.builder.getOrCreate()
__lowercase= df
__lowercase= working_dir
super().__init__(
cache_dir=lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **lowerCAmelCase , )
def _A (self ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCAmelCase )
__lowercase= os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCAmelCase , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowercase= (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def _A (self ):
return datasets.DatasetInfo(features=self.config.features )
def _A (self , lowerCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _A (self , lowerCAmelCase ):
import pyspark
def get_arrow_batch_size(lowerCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__lowercase= self.df.count()
__lowercase= df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowercase= (
self.df.limit(lowerCAmelCase )
.repartition(1 )
.mapInArrow(lowerCAmelCase , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowercase= approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowercase= min(lowerCAmelCase , int(approx_total_size / max_shard_size ) )
__lowercase= self.df.repartition(lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
import pyspark
__lowercase= ParquetWriter if file_format == 'parquet' else ArrowWriter
__lowercase= os.path.join(self._working_dir , os.path.basename(lowerCAmelCase ) ) if self._working_dir else fpath
__lowercase= file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowercase= self.config.features
__lowercase= self._writer_batch_size
__lowercase= self._fs.storage_options
def write_arrow(lowerCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowercase= pyspark.TaskContext().taskAttemptId()
__lowercase= next(lowerCAmelCase , lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__lowercase= 0
__lowercase= writer_class(
features=lowerCAmelCase , path=working_fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
__lowercase= pa.Table.from_batches([first_batch] )
writer.write_table(lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowercase, __lowercase= writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__lowercase= writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
__lowercase= pa.Table.from_batches([batch] )
writer.write_table(lowerCAmelCase )
if writer._num_bytes > 0:
__lowercase, __lowercase= writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCAmelCase ) ):
__lowercase= os.path.join(os.path.dirname(lowerCAmelCase ) , os.path.basename(lowerCAmelCase ) )
shutil.move(lowerCAmelCase , lowerCAmelCase )
__lowercase= (
self.df.mapInArrow(lowerCAmelCase , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _A (self , lowerCAmelCase , lowerCAmelCase = "arrow" , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
self._validate_cache_dir()
__lowercase= convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCAmelCase )
__lowercase= not is_remote_filesystem(self._fs )
__lowercase= os.path.join if is_local else posixpath.join
__lowercase= '-TTTTT-SSSSS-of-NNNNN'
__lowercase= f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
__lowercase= path_join(self._output_dir , lowerCAmelCase )
__lowercase= 0
__lowercase= 0
__lowercase= 0
__lowercase= []
__lowercase= []
for task_id, content in self._prepare_split_single(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCAmelCase )
__lowercase= total_num_examples
__lowercase= total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
__lowercase= all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowercase= self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
rename(
lowerCAmelCase , fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , fpath.replace('TTTTT-SSSSS' , f'{global_shard_id:05d}' ).replace('NNNNN' , f'{total_shards:05d}' ) , )
__lowercase= []
__lowercase= 0
for i in range(len(lowerCAmelCase ) ):
__lowercase, __lowercase= task_id_and_num_shards[i]
for shard_id in range(lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCAmelCase , len(lowerCAmelCase ) ).map(lambda lowerCAmelCase : _rename_shard(*lowerCAmelCase ) ).collect()
else:
# don't use any pattern
__lowercase= 0
__lowercase= task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , fpath.replace(lowerCAmelCase , '' ) , )
def _A (self , lowerCAmelCase , ):
return SparkExamplesIterable(self.df )
| 352
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 0
|
import operator
def _lowerCamelCase( lowercase__ , lowercase__ = False , lowercase__ = None ) -> list:
'''simple docstring'''
__lowercase= operator.lt if reverse else operator.gt
__lowercase= solution or []
if not arr:
return solution
__lowercase= [arr.pop(0 )]
for i, item in enumerate(lowercase__ ):
if _operator(lowercase__ , sublist[-1] ):
sublist.append(lowercase__ )
arr.pop(lowercase__ )
# merging sublist into solution list
if not solution:
solution.extend(lowercase__ )
else:
while sublist:
__lowercase= sublist.pop(0 )
for i, xx in enumerate(lowercase__ ):
if not _operator(lowercase__ , lowercase__ ):
solution.insert(lowercase__ , lowercase__ )
break
else:
solution.append(lowercase__ )
strand_sort(lowercase__ , lowercase__ , lowercase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 353
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 0
|
import os
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(lowercase__ ) + '/p022_names.txt' ) as file:
__lowercase= str(file.readlines()[0] )
__lowercase= names.replace('"' , '' ).split(',' )
names.sort()
__lowercase= 0
__lowercase= 0
for i, name in enumerate(lowercase__ ):
for letter in name:
name_score += ord(lowercase__ ) - 6_4
total_score += (i + 1) * name_score
__lowercase= 0
return total_score
if __name__ == "__main__":
print(solution())
| 354
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 0
|
import random
class A :
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= [ord(lowerCAmelCase ) for i in text]
__lowercase= []
__lowercase= []
for i in plain:
__lowercase= random.randint(1 , 3_0_0 )
__lowercase= (i + k) * k
cipher.append(lowerCAmelCase )
key.append(lowerCAmelCase )
return cipher, key
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase ):
__lowercase= []
for i in range(len(lowerCAmelCase ) ):
__lowercase= int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCAmelCase ) )
return "".join(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase ,lowerCAmelCase = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 355
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A ( A_ ):
def __init__(self , *lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
__lowercase= eval_examples
__lowercase= post_process_function
def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = "eval" ):
__lowercase= self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase= self.get_eval_dataloader(lowerCAmelCase )
__lowercase= self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase= self.compute_metrics
__lowercase= None
__lowercase= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__lowercase= time.time()
try:
__lowercase= eval_loop(
lowerCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase , metric_key_prefix=lowerCAmelCase , )
finally:
__lowercase= compute_metrics
__lowercase= self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowerCAmelCase , lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowercase= self.post_process_function(lowerCAmelCase , lowerCAmelCase , output.predictions )
__lowercase= self.compute_metrics(lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
__lowercase= metrics.pop(lowerCAmelCase )
metrics.update(output.metrics )
else:
__lowercase= output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase= self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase )
return metrics
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase = "test" ):
__lowercase= self.get_test_dataloader(lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase= self.compute_metrics
__lowercase= None
__lowercase= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__lowercase= time.time()
try:
__lowercase= eval_loop(
lowerCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase , metric_key_prefix=lowerCAmelCase , )
finally:
__lowercase= compute_metrics
__lowercase= self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowerCAmelCase , lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase= self.post_process_function(lowerCAmelCase , lowerCAmelCase , output.predictions , 'predict' )
__lowercase= self.compute_metrics(lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
__lowercase= metrics.pop(lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase )
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCAmelCase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCAmelCase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def _lowerCamelCase( lowercase__ , lowercase__=False ) -> List[Any]:
'''simple docstring'''
__lowercase= model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]['file_name'] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ) -> Any:
'''simple docstring'''
if model_type == "text":
__lowercase= BarkSemanticModel
__lowercase= BarkSemanticConfig
__lowercase= BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowercase= BarkCoarseModel
__lowercase= BarkCoarseConfig
__lowercase= BarkCoarseGenerationConfig
elif model_type == "fine":
__lowercase= BarkFineModel
__lowercase= BarkFineConfig
__lowercase= BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowercase= F'{model_type}_small' if use_small else model_type
__lowercase= REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['repo_id'] , model_info['file_name'] )
__lowercase= torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
__lowercase= checkpoint['model_args']
if "input_vocab_size" not in model_args:
__lowercase= model_args['vocab_size']
__lowercase= model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowercase= model_args.pop('n_head' )
__lowercase= model_args.pop('n_embd' )
__lowercase= model_args.pop('n_layer' )
__lowercase= ConfigClass(**checkpoint['model_args'] )
__lowercase= ModelClass(config=lowercase__ )
__lowercase= GenerationConfigClass()
__lowercase= model_generation_config
__lowercase= checkpoint['model']
# fixup checkpoint
__lowercase= '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
__lowercase= k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
__lowercase= new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
__lowercase= state_dict.pop(lowercase__ )
__lowercase= set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowercase= {k for k in extra_keys if not k.endswith('.attn.bias' )}
__lowercase= set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowercase= {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowercase__ ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(lowercase__ ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(lowercase__ , strict=lowercase__ )
__lowercase= model.num_parameters(exclude_embeddings=lowercase__ )
__lowercase= checkpoint['best_val_loss'].item()
logger.info(F'model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss' )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def _lowerCamelCase( lowercase__ , lowercase__=False , lowercase__="text" ) -> str:
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowercase= 'cpu' # do conversion on cpu
__lowercase= _get_ckpt_path(lowercase__ , use_small=lowercase__ )
__lowercase= _load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
__lowercase= _bark_load_model(lowercase__ , 'cpu' , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
__lowercase= bark_model['model']
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__lowercase= 5
__lowercase= 1_0
if model_type in ["text", "coarse"]:
__lowercase= torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
__lowercase= bark_model(lowercase__ )[0]
__lowercase= model(lowercase__ )
# take last logits
__lowercase= output_new_model_total.logits[:, [-1], :]
else:
__lowercase= 3
__lowercase= 8
__lowercase= torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowercase= model(lowercase__ , lowercase__ )
__lowercase= bark_model(lowercase__ , lowercase__ )
__lowercase= output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Any:
'''simple docstring'''
__lowercase= os.path.join(lowercase__ , lowercase__ )
__lowercase= BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) )
__lowercase= BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) )
__lowercase= BarkFineConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) )
__lowercase= EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__lowercase= BarkSemanticModel.from_pretrained(lowercase__ )
__lowercase= BarkCoarseModel.from_pretrained(lowercase__ )
__lowercase= BarkFineModel.from_pretrained(lowercase__ )
__lowercase= EncodecModel.from_pretrained('facebook/encodec_24khz' )
__lowercase= BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase= BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowercase= BarkModel(lowercase__ )
__lowercase= semantic
__lowercase= coarseAcoustic
__lowercase= fineAcoustic
__lowercase= codec
__lowercase= bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> list:
'''simple docstring'''
__lowercase= []
__lowercase, __lowercase= input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowercase= result + left + right
return input_list
def _lowerCamelCase( lowercase__ ) -> list:
'''simple docstring'''
if len(lowercase__ ) <= 1:
return input_list
__lowercase= list(lowercase__ )
# iteration for two-way merging
__lowercase= 2
while p <= len(lowercase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowercase__ ) , lowercase__ ):
__lowercase= i
__lowercase= i + p - 1
__lowercase= (low + high + 1) // 2
__lowercase= merge(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# final merge of last two parts
if p * 2 >= len(lowercase__ ):
__lowercase= i
__lowercase= merge(lowercase__ , 0 , lowercase__ , len(lowercase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
lowerCAmelCase = []
else:
lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 0
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCamelCase( ) -> Tuple:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase= '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , lowercase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCamelCase( ) -> List[Any]:
'''simple docstring'''
assert _test_patching.open is open
__lowercase= '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , lowercase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , lowercase__ ):
pass
def _lowerCamelCase( ) -> List[Any]:
'''simple docstring'''
__lowercase= '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , lowercase__ ) is None
with patch_submodule(_test_patching , 'len' , lowercase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCamelCase( ) -> int:
'''simple docstring'''
__lowercase= '__test_patch_submodule_start_and_stop_mock__'
__lowercase= patch_submodule(_test_patching , 'open' , lowercase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase= '__test_patch_submodule_successive_join__'
__lowercase= '__test_patch_submodule_successive_dirname__'
__lowercase= '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , lowercase__ ):
with patch_submodule(_test_patching , 'os.rename' , lowercase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , lowercase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , lowercase__ ):
with patch_submodule(_test_patching , 'os.path.join' , lowercase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , lowercase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCamelCase( ) -> int:
'''simple docstring'''
__lowercase= '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , lowercase__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , lowercase__ ):
pass
| 360
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 0
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
lowerCAmelCase = {
'''AI-Sweden/gpt-sw3-126m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-350m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-1.6b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-6.7b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-20b''': 2_0_4_8,
}
class A ( A_ ):
UpperCamelCase_ : Tuple =VOCAB_FILES_NAMES
UpperCamelCase_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] =['''input_ids''', '''attention_mask''']
def __init__(self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = None , **lowerCAmelCase , ):
__lowercase= {} if sp_model_kwargs is None else sp_model_kwargs
__lowercase= kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
__lowercase= 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__lowercase= '<|endoftext|>' if eos_token is None else eos_token
__lowercase= '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__lowercase= unk_token if pad_token is None else pad_token
__lowercase= eos_token if bos_token is None else bos_token
else:
__lowercase= '<pad>' if pad_token is None else pad_token
__lowercase= '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
__lowercase= do_lower_case
__lowercase= remove_space
__lowercase= keep_accents
__lowercase= vocab_file
__lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
__lowercase= {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__lowercase= re.compile(
f'[{"".join(map(lowerCAmelCase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__(self ):
__lowercase= self.__dict__.copy()
__lowercase= None
return state
def __setstate__(self , lowerCAmelCase ):
__lowercase= d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase= {}
__lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _A (self ):
return len(self.sp_model )
def _A (self , lowerCAmelCase ):
__lowercase= self.non_printing_characters_re.sub('' , lowerCAmelCase )
# Normalize whitespaces
__lowercase= ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
__lowercase= unicodedata.normalize('NFC' , lowerCAmelCase )
return text
def _A (self , lowerCAmelCase , **lowerCAmelCase ):
__lowercase= self.preprocess_text(lowerCAmelCase )
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return self.sp_model.PieceToId(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return self.sp_model.IdToPiece(lowerCAmelCase )
@staticmethod
def _A (lowerCAmelCase ):
return out_string
def _A (self , lowerCAmelCase ):
__lowercase= []
__lowercase= ''
__lowercase= False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
__lowercase= True
__lowercase= []
else:
current_sub_tokens.append(lowerCAmelCase )
__lowercase= False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string
def _A (self ):
__lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , 'wb' ) as fi:
__lowercase= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def _A (self , lowerCAmelCase , lowerCAmelCase = False ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.preprocess_text(lowerCAmelCase )
__lowercase= self.sp_model.encode(lowerCAmelCase )
else:
__lowercase= [self.preprocess_text(lowerCAmelCase ) for t in text]
__lowercase= self.sp_model.encode(lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
__lowercase= torch.tensor(lowerCAmelCase )
return token_ids
def _A (self , lowerCAmelCase ):
return self.sp_model.decode(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
__lowercase= (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowerCAmelCase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowerCAmelCase )
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase = {
'''google/rembert''': 2_5_6,
}
class A ( A_ ):
UpperCamelCase_ : int =VOCAB_FILES_NAMES
UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="[CLS]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , **lowerCAmelCase , ):
super().__init__(
do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= do_lower_case
__lowercase= remove_space
__lowercase= keep_accents
__lowercase= vocab_file
__lowercase= spm.SentencePieceProcessor()
self.sp_model.Load(lowerCAmelCase )
@property
def _A (self ):
return len(self.sp_model )
def _A (self ):
__lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
__lowercase= self.__dict__.copy()
__lowercase= None
return state
def __setstate__(self , lowerCAmelCase ):
__lowercase= d
__lowercase= spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _A (self , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= self.sp_model.EncodeAsPieces(lowerCAmelCase )
return pieces
def _A (self , lowerCAmelCase ):
return self.sp_model.PieceToId(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return self.sp_model.IdToPiece(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= self.sp_model.decode_pieces(lowerCAmelCase )
return out_string
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= [self.sep_token_id]
__lowercase= [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1]
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= [self.sep_token_id]
__lowercase= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase ) )
return
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
return (out_vocab_file,)
| 362
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar('''T''')
class A ( Generic[T] ):
def __init__(self , lowerCAmelCase ):
__lowercase= data
__lowercase= None
def __str__(self ):
return f'{self.data}'
class A ( Generic[T] ):
def __init__(self ):
__lowercase= None
def __iter__(self ):
__lowercase= self.top
while node:
yield node.data
__lowercase= node.next
def __str__(self ):
return "->".join([str(lowerCAmelCase ) for item in self] )
def __len__(self ):
return len(tuple(iter(self ) ) )
def _A (self ):
return self.top is None
def _A (self , lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase )
if not self.is_empty():
__lowercase= self.top
__lowercase= node
def _A (self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , lowerCAmelCase )
__lowercase= self.top
__lowercase= self.top.next
return pop_node.data
def _A (self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def _A (self ):
__lowercase= None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 0
|
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError('Input value must be a \'int\' type' )
return bin(lowercase__ ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('String lengths must match!' )
__lowercase= 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 0
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCAmelCase = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowerCAmelCase = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowerCAmelCase = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def _A (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__lowercase= np.array([re.sub(lowerCAmelCase , '' , lowerCAmelCase ) for x in predictions] )
__lowercase= np.array([re.sub(lowerCAmelCase , '' , lowerCAmelCase ) for x in references] )
else:
__lowercase= np.asarray(lowerCAmelCase )
__lowercase= np.asarray(lowerCAmelCase )
if ignore_case:
__lowercase= np.char.lower(lowerCAmelCase )
__lowercase= np.char.lower(lowerCAmelCase )
if ignore_punctuation:
__lowercase= string.punctuation.maketrans('' , '' , string.punctuation )
__lowercase= np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
__lowercase= np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
if ignore_numbers:
__lowercase= string.digits.maketrans('' , '' , string.digits )
__lowercase= np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
__lowercase= np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
__lowercase= predictions == references
return {"exact_match": np.mean(lowerCAmelCase ) * 1_0_0}
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A ( A_ ):
def __get__(self , lowerCAmelCase , lowerCAmelCase=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
__lowercase= '__cached_' + self.fget.__name__
__lowercase= getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if cached is None:
__lowercase= self.fget(lowerCAmelCase )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return cached
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
if is_torch_fx_proxy(lowercase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowercase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowercase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowercase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowercase__ , np.ndarray )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
return isinstance(lowercase__ , np.ndarray )
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
return _is_numpy(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
import torch
return isinstance(lowercase__ , torch.Tensor )
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
import torch
return isinstance(lowercase__ , torch.device )
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
import torch
if isinstance(lowercase__ , lowercase__ ):
if hasattr(lowercase__ , lowercase__ ):
__lowercase= getattr(lowercase__ , lowercase__ )
else:
return False
return isinstance(lowercase__ , torch.dtype )
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
return isinstance(lowercase__ , tf.Tensor )
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowercase__ , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowercase__ )
return type(lowercase__ ) == tf.Tensor
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(lowercase__ , jnp.ndarray )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowercase__ , (dict, UserDict) ):
return {k: to_py_obj(lowercase__ ) for k, v in obj.items()}
elif isinstance(lowercase__ , (list, tuple) ):
return [to_py_obj(lowercase__ ) for o in obj]
elif is_tf_tensor(lowercase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowercase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowercase__ ):
return np.asarray(lowercase__ ).tolist()
elif isinstance(lowercase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase__ , (dict, UserDict) ):
return {k: to_numpy(lowercase__ ) for k, v in obj.items()}
elif isinstance(lowercase__ , (list, tuple) ):
return np.array(lowercase__ )
elif is_tf_tensor(lowercase__ ):
return obj.numpy()
elif is_torch_tensor(lowercase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowercase__ ):
return np.asarray(lowercase__ )
else:
return obj
class A ( A_ ):
def _A (self ):
__lowercase= fields(self )
# Safety and consistency checks
if not len(lowerCAmelCase ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
__lowercase= getattr(self , class_fields[0].name )
__lowercase= all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCAmelCase ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= first_field.items()
__lowercase= True
else:
try:
__lowercase= iter(lowerCAmelCase )
__lowercase= True
except TypeError:
__lowercase= False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCAmelCase ):
if (
not isinstance(lowerCAmelCase , (list, tuple) )
or not len(lowerCAmelCase ) == 2
or not isinstance(element[0] , lowerCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__lowercase= first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__lowercase= element[1]
elif first_field is not None:
__lowercase= first_field
else:
for field in class_fields:
__lowercase= getattr(self , field.name )
if v is not None:
__lowercase= v
def __delitem__(self , *lowerCAmelCase , **lowerCAmelCase ):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__(self , lowerCAmelCase ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self , lowerCAmelCase , lowerCAmelCase ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCAmelCase , lowerCAmelCase )
super().__setattr__(lowerCAmelCase , lowerCAmelCase )
def __setitem__(self , lowerCAmelCase , lowerCAmelCase ):
# Will raise a KeyException if needed
super().__setitem__(lowerCAmelCase , lowerCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCAmelCase , lowerCAmelCase )
def _A (self ):
return tuple(self[k] for k in self.keys() )
class A ( A_ , A_ ):
@classmethod
def _A (cls , lowerCAmelCase ):
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class A ( A_ ):
UpperCamelCase_ : str ='''longest'''
UpperCamelCase_ : Optional[Any] ='''max_length'''
UpperCamelCase_ : Optional[Any] ='''do_not_pad'''
class A ( A_ ):
UpperCamelCase_ : List[str] ='''pt'''
UpperCamelCase_ : int ='''tf'''
UpperCamelCase_ : List[str] ='''np'''
UpperCamelCase_ : Optional[Any] ='''jax'''
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= context_managers
__lowercase= ExitStack()
def __enter__(self ):
for context_manager in self.context_managers:
self.stack.enter_context(lowerCAmelCase )
def __exit__(self , *lowerCAmelCase , **lowerCAmelCase ):
self.stack.__exit__(*lowerCAmelCase , **lowerCAmelCase )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= infer_framework(lowercase__ )
if framework == "tf":
__lowercase= inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__lowercase= inspect.signature(model_class.forward ) # PyTorch models
else:
__lowercase= inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= model_class.__name__
__lowercase= infer_framework(lowercase__ )
if framework == "tf":
__lowercase= inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__lowercase= inspect.signature(model_class.forward ) # PyTorch models
else:
__lowercase= inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCamelCase( lowercase__ , lowercase__ = "" , lowercase__ = "." ) -> str:
'''simple docstring'''
def _flatten_dict(lowercase__ , lowercase__="" , lowercase__="." ):
for k, v in d.items():
__lowercase= str(lowercase__ ) + delimiter + str(lowercase__ ) if parent_key else k
if v and isinstance(lowercase__ , lowercase__ ):
yield from flatten_dict(lowercase__ , lowercase__ , delimiter=lowercase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowercase__ , lowercase__ , lowercase__ ) )
@contextmanager
def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> int:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> Any:
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.transpose(lowercase__ , axes=lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.T if axes is None else array.permute(*lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.transpose(lowercase__ , perm=lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.transpose(lowercase__ , axes=lowercase__ )
else:
raise ValueError(F'Type not supported for transpose: {type(lowercase__ )}.' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.reshape(lowercase__ , lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.reshape(*lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.reshape(lowercase__ , lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.reshape(lowercase__ , lowercase__ )
else:
raise ValueError(F'Type not supported for reshape: {type(lowercase__ )}.' )
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> Union[str, Any]:
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.squeeze(lowercase__ , axis=lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.squeeze(lowercase__ , axis=lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.squeeze(lowercase__ , axis=lowercase__ )
else:
raise ValueError(F'Type not supported for squeeze: {type(lowercase__ )}.' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]:
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.expand_dims(lowercase__ , lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.unsqueeze(dim=lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.expand_dims(lowercase__ , axis=lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.expand_dims(lowercase__ , axis=lowercase__ )
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowercase__ )}.' )
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.size(lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.numel()
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.size(lowercase__ )
elif is_jax_tensor(lowercase__ ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowercase__ )}.' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(lowercase__ , (tuple, list) ):
__lowercase= [F'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__lowercase= F'{repo_id}--{value}'
return auto_map
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for base_class in inspect.getmro(lowercase__ ):
__lowercase= base_class.__module__
__lowercase= base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 369
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 0
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : str =ConsistencyModelPipeline
UpperCamelCase_ : List[Any] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase_ : Optional[Any] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase_ : Union[str, Any] =frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _A (self ):
__lowercase= UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _A (self ):
__lowercase= UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _A (self , lowerCAmelCase=False ):
if class_cond:
__lowercase= self.dummy_cond_unet
else:
__lowercase= self.dummy_uncond_unet
# Default to CM multistep sampler
__lowercase= CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowercase= {
'unet': unet,
'scheduler': scheduler,
}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith('mps' ):
__lowercase= torch.manual_seed(lowerCAmelCase )
else:
__lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowercase= {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [2_2, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= ConsistencyModelPipeline(**lowerCAmelCase )
__lowercase= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components(class_cond=lowerCAmelCase )
__lowercase= ConsistencyModelPipeline(**lowerCAmelCase )
__lowercase= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= 0
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= ConsistencyModelPipeline(**lowerCAmelCase )
__lowercase= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= 1
__lowercase= None
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components(class_cond=lowerCAmelCase )
__lowercase= ConsistencyModelPipeline(**lowerCAmelCase )
__lowercase= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= 1
__lowercase= None
__lowercase= 0
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self , lowerCAmelCase=0 , lowerCAmelCase=False , lowerCAmelCase="cpu" , lowerCAmelCase=torch.floataa , lowerCAmelCase=(1, 3, 6_4, 6_4) ):
__lowercase= torch.manual_seed(lowerCAmelCase )
__lowercase= {
'num_inference_steps': None,
'timesteps': [2_2, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowercase= self.get_fixed_latents(seed=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase , shape=lowerCAmelCase )
__lowercase= latents
return inputs
def _A (self , lowerCAmelCase=0 , lowerCAmelCase="cpu" , lowerCAmelCase=torch.floataa , lowerCAmelCase=(1, 3, 6_4, 6_4) ):
if type(lowerCAmelCase ) == str:
__lowercase= torch.device(lowerCAmelCase )
__lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowercase= randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase )
return latents
def _A (self ):
__lowercase= UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowercase= CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowercase= ConsistencyModelPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(torch_device=lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_inputs()
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _A (self ):
__lowercase= UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowercase= CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowercase= ConsistencyModelPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(torch_device=lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_inputs()
__lowercase= 1
__lowercase= None
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _A (self ):
__lowercase= UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowercase= CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowercase= ConsistencyModelPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(torch_device=lowerCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_inputs(get_fixed_latents=lowerCAmelCase , device=lowerCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase , enable_math=lowerCAmelCase , enable_mem_efficient=lowerCAmelCase ):
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _A (self ):
__lowercase= UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowercase= CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowercase= ConsistencyModelPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(torch_device=lowerCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_inputs(get_fixed_latents=lowerCAmelCase , device=lowerCAmelCase )
__lowercase= 1
__lowercase= None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase , enable_math=lowerCAmelCase , enable_mem_efficient=lowerCAmelCase ):
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 370
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 0
|
def _lowerCamelCase( lowercase__ ) -> list:
'''simple docstring'''
if len(lowercase__ ) <= 1:
return lst
__lowercase= 1
while i < len(lowercase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowercase, __lowercase= lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowercase= 1
return lst
if __name__ == "__main__":
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 371
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class lowerCamelCase (nn.Module ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = (1_6, 3_2, 9_6, 2_5_6)
lowerCamelCase__ = jnp.floataa
def __A ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE_ = self.block_out_channels[i]
SCREAMING_SNAKE_CASE_ = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE_ = nn.Conv(
__magic_name__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = nn.Conv(
__magic_name__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = blocks
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[str] , __magic_name__ : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.conv_in(__magic_name__ )
SCREAMING_SNAKE_CASE_ = nn.silu(__magic_name__ )
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = block(__magic_name__ )
SCREAMING_SNAKE_CASE_ = nn.silu(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.conv_out(__magic_name__ )
return embedding
@flax_register_to_config
class lowerCamelCase (nn.Module , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = 3_2
lowerCamelCase__ = 4
lowerCamelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase__ = False
lowerCamelCase__ = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
lowerCamelCase__ = 2
lowerCamelCase__ = 8
lowerCamelCase__ = None
lowerCamelCase__ = 1_2_8_0
lowerCamelCase__ = 0.0
lowerCamelCase__ = False
lowerCamelCase__ = jnp.floataa
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = "rgb"
lowerCamelCase__ = (1_6, 3_2, 9_6, 2_5_6)
def __A ( self : Optional[int] , __magic_name__ : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE_ = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE_ = jnp.zeros(__magic_name__ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE_ = jnp.zeros(__magic_name__ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = jax.random.split(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )["params"]
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = self.block_out_channels
SCREAMING_SNAKE_CASE_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE_ = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE_ = FlaxTimestepEmbedding(__magic_name__ , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE_ = self.only_cross_attention
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
SCREAMING_SNAKE_CASE_ = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__magic_name__ )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(__magic_name__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE_ = FlaxCrossAttnDownBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ = FlaxDownBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__magic_name__ )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__magic_name__ )
if not is_final_block:
SCREAMING_SNAKE_CASE_ = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = down_blocks
SCREAMING_SNAKE_CASE_ = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE_ = block_out_channels[-1]
SCREAMING_SNAKE_CASE_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__magic_name__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Dict , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : float = 1.0 , __magic_name__ : bool = True , __magic_name__ : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
SCREAMING_SNAKE_CASE_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE_ = jnp.flip(__magic_name__ , axis=1 )
# 1. time
if not isinstance(__magic_name__ , jnp.ndarray ):
SCREAMING_SNAKE_CASE_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__magic_name__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(__magic_name__ , 0 )
SCREAMING_SNAKE_CASE_ = self.time_proj(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.time_embedding(__magic_name__ )
# 2. pre-process
SCREAMING_SNAKE_CASE_ = jnp.transpose(__magic_name__ , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.conv_in(__magic_name__ )
SCREAMING_SNAKE_CASE_ = jnp.transpose(__magic_name__ , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.controlnet_cond_embedding(__magic_name__ )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE_ = (sample,)
for down_block in self.down_blocks:
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(__magic_name__ , __magic_name__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE_ = self.mid_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE_ = ()
for down_block_res_sample, controlnet_block in zip(__magic_name__ , self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE_ = controlnet_block(__magic_name__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE_ = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE_ = self.controlnet_mid_block(__magic_name__ )
# 6. scaling
SCREAMING_SNAKE_CASE_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__magic_name__ , mid_block_res_sample=__magic_name__ )
| 305
|
from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
from ...configuration_utils import PretrainedConfig
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''bert-generation'''
def __init__( self : Union[str, Any] , __magic_name__ : List[str]=50_358 , __magic_name__ : Optional[Any]=1_024 , __magic_name__ : Optional[Any]=24 , __magic_name__ : str=16 , __magic_name__ : str=4_096 , __magic_name__ : List[str]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : int=0.02 , __magic_name__ : Optional[Any]=1e-12 , __magic_name__ : int=0 , __magic_name__ : Tuple=2 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[Any]="absolute" , __magic_name__ : Tuple=True , **__magic_name__ : Dict , ) -> int:
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
| 305
|
from math import pi, sqrt, tan
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 305
| 1
|
def a__ ( __UpperCamelCase = 1_0 , __UpperCamelCase = 1_0_0_0 , __UpperCamelCase = True ):
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and isinstance(__UpperCamelCase , __UpperCamelCase )
and isinstance(__UpperCamelCase , __UpperCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return int((number_a + number_a) / 2 )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
assert (
isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(__UpperCamelCase , __UpperCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(__UpperCamelCase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
SCREAMING_SNAKE_CASE_ = lower
SCREAMING_SNAKE_CASE_ = higher
SCREAMING_SNAKE_CASE_ = []
while True:
SCREAMING_SNAKE_CASE_ = get_avg(__UpperCamelCase , __UpperCamelCase )
last_numbers.append(__UpperCamelCase )
if answer(__UpperCamelCase ) == "low":
SCREAMING_SNAKE_CASE_ = number
elif answer(__UpperCamelCase ) == "high":
SCREAMING_SNAKE_CASE_ = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = int(input("Enter lower value : " ).strip() )
SCREAMING_SNAKE_CASE_ = int(input("Enter high value : " ).strip() )
SCREAMING_SNAKE_CASE_ = int(input("Enter value to guess : " ).strip() )
guess_the_number(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 305
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''blenderbot-small'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __magic_name__ : Dict=50_265 , __magic_name__ : str=512 , __magic_name__ : List[Any]=8 , __magic_name__ : Any=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=512 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[Any]=False , __magic_name__ : str=0 , __magic_name__ : Dict=1 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Optional[Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ = {0: "batch"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super().outputs
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __A ( self : int , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE_ = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE_ = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = decoder_seq_length + 3
SCREAMING_SNAKE_CASE_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ = min(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = max(__magic_name__ , __magic_name__ ) - min_num_layers
SCREAMING_SNAKE_CASE_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __A ( self : Union[str, Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __A ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = tokenizer.num_special_tokens_to_add(__magic_name__ )
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
| 305
| 1
|
import itertools
import math
def a__ ( __UpperCamelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( ):
SCREAMING_SNAKE_CASE_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def a__ ( __UpperCamelCase = 1_0_0_0_1 ):
return next(itertools.islice(prime_generator() , nth - 1 , __UpperCamelCase ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 305
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int=100 , __magic_name__ : Optional[Any]=13 , __magic_name__ : Dict=30 , __magic_name__ : Tuple=2 , __magic_name__ : str=3 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[int]=4 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=None , __magic_name__ : Tuple=[0, 1, 2, 3] , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 100
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = out_indices
SCREAMING_SNAKE_CASE_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 1
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Dict ) -> Optional[int]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __A ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = BeitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __A ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int ) -> int:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = BeitModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __A ( self : List[str] ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __A ( self : str ) -> List[str]:
pass
def __A ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __A ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
def __A ( self : int ) -> Optional[int]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __A ( self : int ) -> Optional[int]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = BeitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> str:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).pixel_values.to(__magic_name__ )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE_ = torch.ones((1, 196) , dtype=torch.bool ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , bool_masked_pos=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __magic_name__ , atol=1e-2 ) )
@slow
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 281
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__magic_name__ , )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 305
| 1
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : str=13 , __magic_name__ : str=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : Tuple=True , __magic_name__ : Any=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[Any]=99 , __magic_name__ : List[Any]=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : str=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=128 , __magic_name__ : Optional[int]=32 , __magic_name__ : Optional[Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : int=0.02 , __magic_name__ : Any=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def __A ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] ) -> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def __A ( self : Optional[Any] ) -> List[str]:
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Dict ) -> str:
SCREAMING_SNAKE_CASE_ = NezhaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , token_type_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = NezhaModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , encoder_hidden_states=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = NezhaForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Tuple , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = NezhaForNextSentencePrediction(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self : int , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : int ) -> str:
SCREAMING_SNAKE_CASE_ = NezhaForPreTraining(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , next_sentence_label=__magic_name__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self : List[str] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = NezhaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : Dict , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = NezhaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = NezhaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = NezhaForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def __A ( self : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : List[Any]=False ) -> str:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def __A ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE_ = NezhaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __A ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__magic_name__ )
def __A ( self : Any ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE_ = None
self.model_tester.create_and_check_model_as_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
def __A ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
def __A ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__magic_name__ )
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __A ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def __A ( self : Any ) -> Tuple:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = NezhaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
@require_torch_gpu
def __A ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.jit.trace(
__magic_name__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__magic_name__ , os.path.join(__magic_name__ , "bert.pt" ) )
SCREAMING_SNAKE_CASE_ = torch.jit.load(os.path.join(__magic_name__ , "bert.pt" ) , map_location=__magic_name__ )
loaded(inputs_dict["input_ids"].to(__magic_name__ ) , inputs_dict["attention_mask"].to(__magic_name__ ) )
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
@slow
def __A ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 305
|
from __future__ import annotations
A : Dict = "#"
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : List[Any] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie[char]
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list:
SCREAMING_SNAKE_CASE_ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ = trie[char]
else:
return []
return self._elements(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> tuple:
SCREAMING_SNAKE_CASE_ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )]
result.extend(__magic_name__ )
return tuple(__magic_name__ )
A : Union[str, Any] = Trie()
A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 305
| 1
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int=100 , __magic_name__ : Optional[Any]=13 , __magic_name__ : Dict=30 , __magic_name__ : Tuple=2 , __magic_name__ : str=3 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[int]=4 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=None , __magic_name__ : Tuple=[0, 1, 2, 3] , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 100
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = out_indices
SCREAMING_SNAKE_CASE_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 1
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Dict ) -> Optional[int]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __A ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = BeitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __A ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int ) -> int:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = BeitModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __A ( self : List[str] ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __A ( self : str ) -> List[str]:
pass
def __A ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __A ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
def __A ( self : int ) -> Optional[int]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __A ( self : int ) -> Optional[int]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = BeitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> str:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).pixel_values.to(__magic_name__ )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE_ = torch.ones((1, 196) , dtype=torch.bool ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , bool_masked_pos=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __magic_name__ , atol=1e-2 ) )
@slow
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 281
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__magic_name__ , )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 305
|
from collections import deque
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = process_name # process name
SCREAMING_SNAKE_CASE_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE_ = arrival_time
SCREAMING_SNAKE_CASE_ = burst_time # remaining burst time
SCREAMING_SNAKE_CASE_ = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE_ = 0 # time from arrival time to completion time
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : deque[Process] , __magic_name__ : int , ) -> None:
# total number of mlfq's queues
SCREAMING_SNAKE_CASE_ = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE_ = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE_ = queue
# current time
SCREAMING_SNAKE_CASE_ = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE_ = deque()
def __A ( self : Dict ) -> list[str]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __A ( self : Tuple , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __A ( self : str , __magic_name__ : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def __A ( self : Optional[Any] , __magic_name__ : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __A ( self : Optional[Any] , __magic_name__ : deque[Process] ) -> deque[Process]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of finished process
while len(__magic_name__ ) != 0:
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__magic_name__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE_ = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE_ = self.current_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __A ( self : Any , __magic_name__ : deque[Process] , __magic_name__ : int ) -> tuple[deque[Process], deque[Process]]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__magic_name__ ) ):
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__magic_name__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__magic_name__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE_ = 0
# set the finish time
SCREAMING_SNAKE_CASE_ = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __A ( self : Any ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A : Dict = Process("P1", 0, 53)
A : str = Process("P2", 0, 17)
A : List[Any] = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Dict = 3
A : Any = [17, 25]
A : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
A : Union[str, Any] = Process("P1", 0, 53)
A : Any = Process("P2", 0, 17)
A : Dict = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Optional[int] = 3
A : int = [17, 25]
A : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
A : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A : Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 305
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = params
SCREAMING_SNAKE_CASE_ = np.array(__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.array([len(__magic_name__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str , __magic_name__ : Tuple ) -> Tuple:
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any] ) -> Any:
return len(self.lengths )
def __A ( self : Tuple ) -> List[str]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __A ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.params.max_model_input_size
SCREAMING_SNAKE_CASE_ = self.lengths > max_len
logger.info(F'''Splitting {sum(__magic_name__ )} too long sequences.''' )
def divide_chunks(__magic_name__ : str , __magic_name__ : List[str] ):
return [l[i : i + n] for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
if self.params.mlm:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
SCREAMING_SNAKE_CASE_ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
SCREAMING_SNAKE_CASE_ = np.insert(__magic_name__ , 0 , __magic_name__ )
if sub_s[-1] != sep_id:
SCREAMING_SNAKE_CASE_ = np.insert(__magic_name__ , len(__magic_name__ ) , __magic_name__ )
assert len(__magic_name__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__magic_name__ )
new_tok_ids.extend(__magic_name__ )
new_lengths.extend([len(__magic_name__ ) for l in sub_seqs] )
SCREAMING_SNAKE_CASE_ = np.array(__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.array(__magic_name__ )
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = len(self )
SCREAMING_SNAKE_CASE_ = self.lengths > 11
SCREAMING_SNAKE_CASE_ = self.token_ids[indices]
SCREAMING_SNAKE_CASE_ = self.lengths[indices]
SCREAMING_SNAKE_CASE_ = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __A ( self : Optional[int] ) -> Tuple:
if "unk_token" not in self.params.special_tok_ids:
return
else:
SCREAMING_SNAKE_CASE_ = self.params.special_tok_ids["unk_token"]
SCREAMING_SNAKE_CASE_ = len(self )
SCREAMING_SNAKE_CASE_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
SCREAMING_SNAKE_CASE_ = (unk_occs / self.lengths) < 0.5
SCREAMING_SNAKE_CASE_ = self.token_ids[indices]
SCREAMING_SNAKE_CASE_ = self.lengths[indices]
SCREAMING_SNAKE_CASE_ = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __A ( self : Any ) -> Any:
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __A ( self : Tuple , __magic_name__ : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = [t[0] for t in batch]
SCREAMING_SNAKE_CASE_ = [t[1] for t in batch]
assert len(__magic_name__ ) == len(__magic_name__ )
# Max for paddings
SCREAMING_SNAKE_CASE_ = max(__magic_name__ )
# Pad token ids
if self.params.mlm:
SCREAMING_SNAKE_CASE_ = self.params.special_tok_ids["pad_token"]
else:
SCREAMING_SNAKE_CASE_ = self.params.special_tok_ids["unk_token"]
SCREAMING_SNAKE_CASE_ = [list(t.astype(__magic_name__ ) ) + [pad_idx] * (max_seq_len_ - len(__magic_name__ )) for t in token_ids]
assert len(tk_ ) == len(__magic_name__ )
assert all(len(__magic_name__ ) == max_seq_len_ for t in tk_ )
SCREAMING_SNAKE_CASE_ = torch.tensor(tk_ ) # (bs, max_seq_len_)
SCREAMING_SNAKE_CASE_ = torch.tensor(__magic_name__ ) # (bs)
return tk_t, lg_t
| 305
|
import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305
| 1
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = inspect.getfile(accelerate.test_utils )
lowerCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase__ = ['''accelerate''', '''launch''']
lowerCamelCase__ = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase__ = '''default_config.yaml'''
lowerCamelCase__ = config_folder / config_file
lowerCamelCase__ = config_folder / '''_default_config.yaml'''
lowerCamelCase__ = Path('''tests/test_configs''' )
@classmethod
def __A ( cls : Dict ) -> Any:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __A ( cls : Dict ) -> Dict:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def __A ( self : int ) -> List[Any]:
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=__magic_name__ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() )
def __A ( self : List[str] ) -> str:
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = '''test-tpu'''
lowerCamelCase__ = '''us-central1-a'''
lowerCamelCase__ = '''ls'''
lowerCamelCase__ = ['''accelerate''', '''tpu-config''']
lowerCamelCase__ = '''cd /usr/share'''
lowerCamelCase__ = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase__ = '''Running gcloud compute tpus tpu-vm ssh'''
def __A ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def __A ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=__magic_name__ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def __A ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , )
def __A ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def __A ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def __A ( self : int ) -> int:
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
| 305
|
from collections.abc import Generator
from math import sin
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) != 3_2:
raise ValueError("Input must be of length 32" )
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "08x" )[-8:]
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = b""
for char in message:
bit_string += format(__UpperCamelCase , "08b" ).encode("utf-8" )
SCREAMING_SNAKE_CASE_ = format(len(__UpperCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) % 5_1_2 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCamelCase ) , 5_1_2 ):
SCREAMING_SNAKE_CASE_ = bit_string[pos : pos + 5_1_2]
SCREAMING_SNAKE_CASE_ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "032b" )
SCREAMING_SNAKE_CASE_ = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return (a + b) % 2**3_2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = preprocess(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
SCREAMING_SNAKE_CASE_ = 0X67452301
SCREAMING_SNAKE_CASE_ = 0Xefcdab89
SCREAMING_SNAKE_CASE_ = 0X98badcfe
SCREAMING_SNAKE_CASE_ = 0X10325476
SCREAMING_SNAKE_CASE_ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = aa
SCREAMING_SNAKE_CASE_ = ba
SCREAMING_SNAKE_CASE_ = ca
SCREAMING_SNAKE_CASE_ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE_ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE_ = (5 * i + 1) % 1_6
elif i <= 4_7:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = (3 * i + 5) % 1_6
else:
SCREAMING_SNAKE_CASE_ = c ^ (b | not_aa(__UpperCamelCase ))
SCREAMING_SNAKE_CASE_ = (7 * i) % 1_6
SCREAMING_SNAKE_CASE_ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
SCREAMING_SNAKE_CASE_ = d
SCREAMING_SNAKE_CASE_ = c
SCREAMING_SNAKE_CASE_ = b
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.