code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_UpperCAmelCase = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : List[Any] ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = _TestCommandArgs(dataset=__lowercase ,all_configs=__lowercase ,save_infos=__lowercase )
A_ : Any = TestCommand(*__lowercase )
test_command.run()
A_ : str = os.path.join(__lowercase ,'README.md' )
assert os.path.exists(__lowercase )
A_ : int = DatasetInfosDict.from_directory(__lowercase )
A_ : Optional[int] = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) ,splits=[
{
'name': 'train',
'num_bytes': 2_35_15_63,
'num_examples': 1_00_00,
},
{
'name': 'validation',
'num_bytes': 23_84_18,
'num_examples': 10_00,
},
] ,download_size=3_94_06_80 ,dataset_size=2_58_99_81 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A_ , A_ : Tuple = getattr(dataset_infos['default'] ,__lowercase ), getattr(expected_dataset_infos['default'] ,__lowercase )
if key == "num_bytes":
assert is_apercent_close(__lowercase ,__lowercase )
elif key == "splits":
assert list(__lowercase ) == list(__lowercase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 70 | def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : int = 0
while b > 0:
if b & 1:
A_ : Any = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 70 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_UpperCAmelCase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
_UpperCAmelCase = {"""facebook/blenderbot_small-90M""": 512}
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
A_ : str = set()
A_ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A_ : Optional[int] = char
A_ : Union[str, Any] = set(__lowercase )
return pairs
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase , lowercase="__start__" , lowercase="__end__" , lowercase="__unk__" , lowercase="__null__" , **lowercase , ):
"""simple docstring"""
super().__init__(unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , pad_token=lowercase , **lowercase )
with open(lowercase , encoding='utf-8' ) as vocab_handle:
A_ : Optional[Any] = json.load(lowercase )
A_ : str = {v: k for k, v in self.encoder.items()}
with open(lowercase , encoding='utf-8' ) as merges_handle:
A_ : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in merges]
A_ : List[Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : Tuple = {}
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A_ : List[str] = re.sub('([.,!?()])' , r' \1' , lowercase )
A_ : Tuple = re.sub('(\')' , r' \1 ' , lowercase )
A_ : Optional[int] = re.sub(r'\s{2,}' , ' ' , lowercase )
if "\n" in token:
A_ : Dict = token.replace('\n' , ' __newln__' )
A_ : Optional[Any] = token.split(' ' )
A_ : Union[str, Any] = []
for token in tokens:
if not len(lowercase ):
continue
A_ : Union[str, Any] = token.lower()
A_ : Optional[int] = tuple(lowercase )
A_ : List[str] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
A_ : Optional[int] = get_pairs(lowercase )
if not pairs:
words.append(lowercase )
continue
while True:
A_ : str = min(lowercase , key=lambda lowercase : self.bpe_ranks.get(lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Tuple = bigram
A_ : List[Any] = []
A_ : int = 0
while i < len(lowercase ):
try:
A_ : List[Any] = word.index(lowercase , lowercase )
new_word.extend(word[i:j] )
A_ : Any = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : int = tuple(lowercase )
A_ : Any = new_word
if len(lowercase ) == 1:
break
else:
A_ : Dict = get_pairs(lowercase )
A_ : List[str] = '@@ '.join(lowercase )
A_ : List[Any] = word[:-4]
A_ : Optional[int] = word
words.append(lowercase )
return " ".join(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = []
A_ : Optional[int] = re.findall(r'\S+\n?' , lowercase )
for token in words:
split_tokens.extend(list(self.bpe(lowercase ).split(' ' ) ) )
return split_tokens
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = token.lower()
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.decoder.get(lowercase , self.unk_token )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = ' '.join(lowercase ).replace('@@ ' , '' ).strip()
return out_string
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : Dict = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A_ : Union[str, Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + '\n' )
A_ : Optional[int] = 0
with open(lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A_ : Dict = token_index
writer.write(' '.join(lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
| 70 | def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
A_ : int = len(lowercase ) - 1
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowercase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase ) , 5 ) == 1
return output_values
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A_ : Union[str, Any] = self.basis_function(lowercase )
A_ : List[Any] = 0.0
A_ : List[str] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase_ ( self , lowercase = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
A_ : list[float] = [] # x coordinates of points to plot
A_ : list[float] = [] # y coordinates of points to plot
A_ : str = 0.0
while t <= 1:
A_ : Union[str, Any] = self.bezier_curve_function(lowercase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
A_ : Tuple = [i[0] for i in self.list_of_points]
A_ : Optional[Any] = [i[1] for i in self.list_of_points]
plt.plot(
lowercase , lowercase , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(lowercase , lowercase , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 70 | from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''summarization'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ROUGE_KEYS
lowerCamelCase_ = '''rouge2'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
A_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
A_ : List[str] = Path(self.output_dir ) / 'metrics.json'
A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
A_ : str = 0
A_ : Any = defaultdict(lowercase )
A_ : Union[str, Any] = self.config.model_type
A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
A_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ : Optional[Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ : Tuple = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ : int = get_git_info()['repo_sha']
A_ : int = hparams.num_workers
A_ : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ : Any = self.decoder_start_token_id
A_ : str = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
A_ : Union[str, Any] = False
A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ : int = self.hparams.eval_max_gen_length
else:
A_ : List[Any] = self.model.config.max_length
A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
A_ : int = True
return readable_batch
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
return self.model(lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer.pad_token_id
A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask']
A_ : str = batch['labels']
if isinstance(self.model , lowercase ):
A_ : Optional[int] = self.model._shift_right(lowercase )
else:
A_ : Any = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ : Optional[Any] = decoder_input_ids
self.save_readable_batch(lowercase )
A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
A_ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 )
A_ , A_ : Any = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self._step(lowercase )
A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
A_ : str = batch['input_ids'].shape[0]
A_ : Any = batch['input_ids'].eq(self.pad ).sum()
A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase="val" ):
"""simple docstring"""
self.step_count += 1
A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ : Dict = losses['loss']
A_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
A_ : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ : Tuple = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
A_ : Dict = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_rouge(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ : Optional[int] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ : int = (time.time() - ta) / batch['input_ids'].shape[0]
A_ : List[str] = self.ids_to_clean_text(lowercase )
A_ : List[str] = self.ids_to_clean_text(batch['labels'] )
A_ : List[Any] = self._step(lowercase )
A_ : int = dict(zip(self.loss_names , lowercase ) )
A_ : Dict = self.calc_generative_metrics(lowercase , lowercase )
A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.validation_epoch_end(lowercase , prefix='test' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.n_obs[type_path]
A_ : List[Any] = self.target_lens[type_path]
A_ : str = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
A_ : Optional[int] = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase )
parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase )
parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
'--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''translation'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ['''bleu''']
lowerCamelCase_ = '''bleu'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , **lowercase )
A_ : List[Any] = hparams.src_lang
A_ : str = hparams.tgt_lang
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_bleu(lowercase , lowercase )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ : SummarizationModule = SummarizationModule(__lowercase )
else:
A_ : SummarizationModule = TranslationModule(__lowercase )
A_ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
A_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase )
A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
A_ : str = False
A_ : Dict = args.val_metric == 'loss'
A_ : pl.Trainer = generic_train(
__lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,)
pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
A_ : Optional[Any] = ''
A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) )
if checkpoints:
A_ : List[Any] = checkpoints[-1]
A_ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
_UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase = parser.parse_args()
main(args)
| 70 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | 1 |
import requests
_UpperCAmelCase = """""" # <-- Put your OpenWeatherMap appid here!
_UpperCAmelCase = """https://api.openweathermap.org/data/2.5/"""
def UpperCamelCase ( __lowercase : str = "Chicago" ,__lowercase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + 'weather' ,params=locals() ).json()
def UpperCamelCase ( __lowercase : str = "Kolkata, India" ,__lowercase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' ,params=locals() ).json()
def UpperCamelCase ( __lowercase : float = 55.68 ,__lowercase : float = 12.57 ,__lowercase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_UpperCAmelCase = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 70 | from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 70 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """MobileNetV1Config"""
# Base docstring
_UpperCAmelCase = """google/mobilenet_v1_1.0_224"""
_UpperCAmelCase = [1, 1024, 7, 7]
# Image classification docstring
_UpperCAmelCase = """google/mobilenet_v1_1.0_224"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCamelCase ( __lowercase : int ,__lowercase : Optional[int] ,__lowercase : str=None ):
'''simple docstring'''
A_ : Any = {}
if isinstance(__lowercase ,__lowercase ):
A_ : Optional[Any] = model.mobilenet_va
else:
A_ : str = model
A_ : List[Any] = 'MobilenetV1/Conv2d_0/'
A_ : Optional[Any] = backbone.conv_stem.convolution.weight
A_ : Optional[int] = backbone.conv_stem.normalization.bias
A_ : int = backbone.conv_stem.normalization.weight
A_ : int = backbone.conv_stem.normalization.running_mean
A_ : int = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A_ : Optional[Any] = i + 1
A_ : Union[str, Any] = i * 2
A_ : Optional[Any] = backbone.layer[pt_index]
A_ : Union[str, Any] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
A_ : int = pointer.convolution.weight
A_ : List[Any] = pointer.normalization.bias
A_ : Any = pointer.normalization.weight
A_ : Tuple = pointer.normalization.running_mean
A_ : Union[str, Any] = pointer.normalization.running_var
A_ : str = backbone.layer[pt_index + 1]
A_ : str = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
A_ : Dict = pointer.convolution.weight
A_ : List[str] = pointer.normalization.bias
A_ : Optional[int] = pointer.normalization.weight
A_ : Dict = pointer.normalization.running_mean
A_ : str = pointer.normalization.running_var
if isinstance(__lowercase ,__lowercase ):
A_ : List[str] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
A_ : Tuple = model.classifier.weight
A_ : Dict = model.classifier.bias
return tf_to_pt_map
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Tuple ,__lowercase : Dict ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
A_ : int = tf.train.list_variables(__lowercase )
A_ : Any = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
A_ : Optional[int] = tf.train.load_variable(__lowercase ,__lowercase )
A_ : int = array
# Build TF to PyTorch weights loading map
A_ : Optional[int] = _build_tf_to_pytorch_map(__lowercase ,__lowercase ,__lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
A_ : Tuple = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
A_ : Union[str, Any] = np.transpose(__lowercase ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
A_ : str = array.squeeze().transpose()
else:
A_ : Dict = np.transpose(__lowercase ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
A_ : Dict = torch.from_numpy(__lowercase )
tf_weights.pop(__lowercase ,__lowercase )
tf_weights.pop(name + '/RMSProp' ,__lowercase )
tf_weights.pop(name + '/RMSProp_1' ,__lowercase )
tf_weights.pop(name + '/ExponentialMovingAverage' ,__lowercase )
logger.info(f'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def UpperCamelCase ( __lowercase : torch.Tensor ,__lowercase : nn.Convad ):
'''simple docstring'''
A_ , A_ : str = features.shape[-2:]
A_ , A_ : str = conv_layer.stride
A_ , A_ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
A_ : Tuple = max(kernel_height - stride_height ,0 )
else:
A_ : Union[str, Any] = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
A_ : Any = max(kernel_width - stride_width ,0 )
else:
A_ : Union[str, Any] = max(kernel_width - (in_width % stride_width) ,0 )
A_ : List[Any] = pad_along_width // 2
A_ : List[str] = pad_along_width - pad_left
A_ : Optional[int] = pad_along_height // 2
A_ : Union[str, Any] = pad_along_height - pad_top
A_ : int = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__lowercase ,__lowercase ,'constant' ,0.0 )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase = 1 , lowercase = 1 , lowercase = False , lowercase = True , lowercase = True , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
A_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A_ : Union[str, Any] = nn.Convad(
in_channels=lowercase , out_channels=lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase , groups=lowercase , bias=lowercase , padding_mode='zeros' , )
if use_normalization:
A_ : Any = nn.BatchNormad(
num_features=lowercase , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowercase , track_running_stats=lowercase , )
else:
A_ : Union[str, Any] = None
if use_activation:
if isinstance(lowercase , lowercase ):
A_ : List[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowercase ):
A_ : Optional[int] = ACTaFN[config.hidden_act]
else:
A_ : Optional[int] = config.hidden_act
else:
A_ : Union[str, Any] = None
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.config.tf_padding:
A_ : List[str] = apply_tf_padding(lowercase , self.convolution )
A_ : List[Any] = self.convolution(lowercase )
if self.normalization is not None:
A_ : List[Any] = self.normalization(lowercase )
if self.activation is not None:
A_ : List[Any] = self.activation(lowercase )
return features
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = MobileNetVaConfig
lowerCamelCase_ = load_tf_weights_in_mobilenet_va
lowerCamelCase_ = '''mobilenet_v1'''
lowerCamelCase_ = '''pixel_values'''
lowerCamelCase_ = False
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if isinstance(lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_UpperCAmelCase = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = True ):
"""simple docstring"""
super().__init__(lowercase )
A_ : int = config
A_ : Union[str, Any] = 3_2
A_ : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
A_ : int = MobileNetVaConvLayer(
lowercase , in_channels=config.num_channels , out_channels=lowercase , kernel_size=3 , stride=2 , )
A_ : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A_ : Optional[int] = nn.ModuleList()
for i in range(1_3 ):
A_ : Optional[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A_ : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowercase , in_channels=lowercase , out_channels=lowercase , kernel_size=3 , stride=strides[i] , groups=lowercase , ) )
self.layer.append(
MobileNetVaConvLayer(
lowercase , in_channels=lowercase , out_channels=lowercase , kernel_size=1 , ) )
A_ : Dict = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , ):
"""simple docstring"""
A_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
A_ : Optional[Any] = self.conv_stem(lowercase )
A_ : Tuple = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A_ : int = layer_module(lowercase )
if output_hidden_states:
A_ : str = all_hidden_states + (hidden_states,)
A_ : List[Any] = hidden_states
if self.pooler is not None:
A_ : Optional[Any] = torch.flatten(self.pooler(lowercase ) , start_dim=1 )
else:
A_ : Any = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=lowercase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
super().__init__(lowercase )
A_ : int = config.num_labels
A_ : Tuple = MobileNetVaModel(lowercase )
A_ : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=lowercase )
A_ : Optional[int] = nn.Linear(lowercase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ):
"""simple docstring"""
A_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Any = self.mobilenet_va(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
A_ : Any = outputs.pooler_output if return_dict else outputs[1]
A_ : Any = self.classifier(self.dropout(lowercase ) )
A_ : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : Any = 'single_label_classification'
else:
A_ : Any = 'multi_label_classification'
if self.config.problem_type == "regression":
A_ : Tuple = MSELoss()
if self.num_labels == 1:
A_ : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A_ : List[str] = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
A_ : Any = CrossEntropyLoss()
A_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : Dict = BCEWithLogitsLoss()
A_ : List[str] = loss_fct(lowercase , lowercase )
if not return_dict:
A_ : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states , )
| 70 | import random
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = num - 1
A_ : Optional[Any] = 0
while s % 2 == 0:
A_ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
A_ : Optional[int] = random.randrange(2 ,num - 1 )
A_ : Any = pow(__lowercase ,__lowercase ,__lowercase )
if v != 1:
A_ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A_ : Union[str, Any] = i + 1
A_ : Tuple = (v**2) % num
return True
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if num < 2:
return False
A_ : Optional[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def UpperCamelCase ( __lowercase : int = 10_24 ):
'''simple docstring'''
while True:
A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
_UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 70 | 1 |
def UpperCamelCase ( __lowercase : dict ):
'''simple docstring'''
A_ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
A_ : set[int] = set()
return any(
node not in visited and depth_first_search(__lowercase ,__lowercase ,__lowercase ,__lowercase )
for node in graph )
def UpperCamelCase ( __lowercase : dict ,__lowercase : int ,__lowercase : set ,__lowercase : set ):
'''simple docstring'''
visited.add(__lowercase )
rec_stk.add(__lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__lowercase ,__lowercase ,__lowercase ,__lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''Speech2TextFeatureExtractor'''
lowerCamelCase_ = '''Speech2TextTokenizer'''
def __init__( self , lowercase , lowercase ):
"""simple docstring"""
super().__init__(lowercase , lowercase )
A_ : List[str] = self.feature_extractor
A_ : Any = False
def __call__( self , *lowercase , **lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A_ : int = kwargs.pop('raw_speech' )
else:
A_ : int = kwargs.pop('audio' , lowercase )
A_ : Any = kwargs.pop('sampling_rate' , lowercase )
A_ : str = kwargs.pop('text' , lowercase )
if len(lowercase ) > 0:
A_ : int = args[0]
A_ : str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A_ : Optional[Any] = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
A_ : Tuple = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ : int = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def lowerCAmelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A_ : Tuple = True
A_ : Union[str, Any] = self.tokenizer
yield
A_ : List[Any] = self.feature_extractor
A_ : Dict = False
| 70 | import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = 4
A_ : int = 3
A_ : List[str] = (3_2, 3_2)
A_ : Any = jax.random.PRNGKey(0 )
A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A_ : int = self.dummy_input
return init_dict, inputs_dict
| 70 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
_UpperCAmelCase = {"""allegro/herbert-base-cased""": 514}
_UpperCAmelCase = {}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = HerbertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase="</s>" , **lowercase , ):
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , sep_token=lowercase , **lowercase , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : List[Any] = [self.cls_token_id]
A_ : int = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Optional[int] = [self.sep_token_id]
A_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : List[Any] = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 70 | import numpy as np
_UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Any = np.array(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = message.lower()
A_ : Tuple = message.replace(' ' , '' )
A_ : int = message.replace('j' , 'i' )
A_ : Any = np.empty((2, len(lowercase )) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
A_ : Union[str, Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) )
A_ : int = ''
for numbers_index in range(len(lowercase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : str = int(second_step[(numbers_index * 2) + 1] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = message.lower()
message.replace(' ' , '' )
A_ : Tuple = np.empty(2 * len(lowercase ) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Optional[int] = numbers[0]
A_ : Dict = numbers[1]
A_ : Optional[int] = first_step.reshape((2, len(lowercase )) )
A_ : List[str] = ''
for numbers_index in range(len(lowercase ) ):
A_ : List[Any] = int(second_step[0, numbers_index] )
A_ : Optional[int] = int(second_step[1, numbers_index] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : str = decoded_message + letter
return decoded_message
| 70 | 1 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ):
"""simple docstring"""
super().__init__()
A_ : Tuple = initial_learning_rate
A_ : List[str] = warmup_steps
A_ : int = power
A_ : Dict = decay_schedule_fn
A_ : Any = name
def __call__( self , lowercase ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A_ : Optional[int] = tf.cast(lowercase , tf.floataa )
A_ : int = tf.cast(self.warmup_steps , tf.floataa )
A_ : Optional[int] = global_step_float / warmup_steps_float
A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,):
'''simple docstring'''
A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,)
if num_warmup_steps:
A_ : Tuple = WarmUp(
initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,)
if weight_decay_rate > 0.0:
A_ : Union[str, Any] = AdamWeightDecay(
learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,)
else:
A_ : Dict = tf.keras.optimizers.Adam(
learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ):
"""simple docstring"""
super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
A_ : Dict = weight_decay_rate
A_ : Union[str, Any] = include_in_weight_decay
A_ : str = exclude_from_weight_decay
@classmethod
def lowerCAmelCase_ ( cls , lowercase ):
"""simple docstring"""
A_ : Tuple = {'WarmUp': WarmUp}
return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase )
A_ : Optional[Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ):
"""simple docstring"""
A_ , A_ : Optional[int] = list(zip(*lowercase ) )
return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A_ : List[str] = apply_state or {}
A_ : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A_ : Dict = self._fallback_apply_state(lowercase , lowercase )
A_ : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return False
return True
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : int = []
A_ : Optional[int] = None
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self._accum_steps is None:
A_ : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowercase ):
"""simple docstring"""
if not self._gradients:
A_ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' )
for accum_gradient, gradient in zip(self._gradients , lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase ) )
| 70 | from math import sqrt
def UpperCamelCase ( __lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : int = 0
A_ : int = 0
A_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowercase ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
_UpperCAmelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCamelCase_ = field(default=__A , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCamelCase_ = field(default=__A , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCamelCase_ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCamelCase_ = field(default=3_2 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCamelCase_ = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = {}
if self.train_dir is not None:
A_ : Any = self.train_dir
if self.validation_dir is not None:
A_ : int = self.validation_dir
A_ : str = data_files if data_files else None
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__A )} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCamelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase_ = field(default=__A , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase=1_9_2 , lowercase=3_2 , lowercase=4 , lowercase=0.6 ):
"""simple docstring"""
A_ : List[str] = input_size
A_ : Dict = mask_patch_size
A_ : Tuple = model_patch_size
A_ : Optional[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
A_ : int = self.input_size // self.mask_patch_size
A_ : Tuple = self.mask_patch_size // self.model_patch_size
A_ : Union[str, Any] = self.rand_size**2
A_ : Union[str, Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
"""simple docstring"""
A_ : Optional[Any] = np.random.permutation(self.token_count )[: self.mask_count]
A_ : Tuple = np.zeros(self.token_count , dtype=lowercase )
A_ : Optional[Any] = 1
A_ : Any = mask.reshape((self.rand_size, self.rand_size) )
A_ : Any = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCamelCase ( __lowercase : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = torch.stack([example['pixel_values'] for example in examples] )
A_ : Any = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ , A_ , A_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ , A_ , A_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' ,__lowercase ,__lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ : Tuple = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
A_ : Tuple = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# If we don't have a validation split, split off a percentage of train as validation.
A_ : List[str] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,__lowercase ) and data_args.train_val_split > 0.0:
A_ : Dict = ds['train'].train_test_split(data_args.train_val_split )
A_ : Optional[Any] = split['train']
A_ : List[str] = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : int = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
A_ : List[str] = AutoConfig.from_pretrained(model_args.config_name_or_path ,**__lowercase )
elif model_args.model_name_or_path:
A_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path ,**__lowercase )
else:
A_ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__lowercase ,'decoder_type' ):
A_ : List[str] = 'simmim'
# adapt config
A_ : Any = model_args.image_size if model_args.image_size is not None else config.image_size
A_ : Union[str, Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
A_ : int = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
A_ : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name ,**__lowercase )
elif model_args.model_name_or_path:
A_ : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path ,**__lowercase )
else:
A_ : List[str] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
A_ : Tuple = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
A_ : Dict = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__lowercase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info('Training new model from scratch' )
A_ : Tuple = AutoModelForMaskedImageModeling.from_config(__lowercase )
if training_args.do_train:
A_ : Dict = ds['train'].column_names
else:
A_ : List[str] = ds['validation'].column_names
if data_args.image_column_name is not None:
A_ : int = data_args.image_column_name
elif "image" in column_names:
A_ : Any = 'image'
elif "img" in column_names:
A_ : str = 'img'
else:
A_ : Optional[int] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
A_ : Optional[int] = Compose(
[
Lambda(lambda __lowercase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size ,scale=(0.67, 1.0) ,ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ),
] )
# create mask generator
A_ : Union[str, Any] = MaskGenerator(
input_size=model_args.image_size ,mask_patch_size=data_args.mask_patch_size ,model_patch_size=model_args.patch_size ,mask_ratio=data_args.mask_ratio ,)
def preprocess_images(__lowercase : Dict ):
A_ : Union[str, Any] = [transforms(__lowercase ) for image in examples[image_column_name]]
A_ : Tuple = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
A_ : List[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
A_ : Optional[int] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowercase )
# Initialize our trainer
A_ : List[str] = Trainer(
model=__lowercase ,args=__lowercase ,train_dataset=ds['train'] if training_args.do_train else None ,eval_dataset=ds['validation'] if training_args.do_eval else None ,tokenizer=__lowercase ,data_collator=__lowercase ,)
# Training
if training_args.do_train:
A_ : Tuple = None
if training_args.resume_from_checkpoint is not None:
A_ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ : Dict = last_checkpoint
A_ : Optional[Any] = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ : Dict = trainer.evaluate()
trainer.log_metrics('eval' ,__lowercase )
trainer.save_metrics('eval' ,__lowercase )
# Write model card and (optionally) push to hub
A_ : int = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 70 | import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ):
"""simple docstring"""
super().__init__()
A_ : Tuple = initial_learning_rate
A_ : List[str] = warmup_steps
A_ : int = power
A_ : Dict = decay_schedule_fn
A_ : Any = name
def __call__( self , lowercase ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A_ : Optional[int] = tf.cast(lowercase , tf.floataa )
A_ : int = tf.cast(self.warmup_steps , tf.floataa )
A_ : Optional[int] = global_step_float / warmup_steps_float
A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,):
'''simple docstring'''
A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,)
if num_warmup_steps:
A_ : Tuple = WarmUp(
initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,)
if weight_decay_rate > 0.0:
A_ : Union[str, Any] = AdamWeightDecay(
learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,)
else:
A_ : Dict = tf.keras.optimizers.Adam(
learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ):
"""simple docstring"""
super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
A_ : Dict = weight_decay_rate
A_ : Union[str, Any] = include_in_weight_decay
A_ : str = exclude_from_weight_decay
@classmethod
def lowerCAmelCase_ ( cls , lowercase ):
"""simple docstring"""
A_ : Tuple = {'WarmUp': WarmUp}
return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase )
A_ : Optional[Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ):
"""simple docstring"""
A_ , A_ : Optional[int] = list(zip(*lowercase ) )
return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A_ : List[str] = apply_state or {}
A_ : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A_ : Dict = self._fallback_apply_state(lowercase , lowercase )
A_ : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return False
return True
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : int = []
A_ : Optional[int] = None
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self._accum_steps is None:
A_ : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowercase ):
"""simple docstring"""
if not self._gradients:
A_ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' )
for accum_gradient, gradient in zip(self._gradients , lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase ) )
| 70 | 1 |
def UpperCamelCase ( __lowercase : int ,__lowercase : Any ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : List[Any]=0 ):
'''simple docstring'''
return sorted(__lowercase ,key=lambda __lowercase : x[column] )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : str ,__lowercase : Optional[Any]=float('inf' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 ,__lowercase ):
A_ : int = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
A_ : int = current_dis
return min_dis
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : int ,__lowercase : Optional[int]=float('inf' ) ):
'''simple docstring'''
for i in range(min(6 ,points_counts - 1 ) ,__lowercase ):
for j in range(max(0 ,i - 6 ) ,__lowercase ):
A_ : Dict = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
A_ : Any = current_dis
return min_dis
def UpperCamelCase ( __lowercase : int ,__lowercase : Dict ,__lowercase : Union[str, Any] ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(__lowercase ,__lowercase )
# recursion
A_ : Any = points_counts // 2
A_ : Dict = closest_pair_of_points_sqr(
__lowercase ,points_sorted_on_y[:mid] ,__lowercase )
A_ : str = closest_pair_of_points_sqr(
__lowercase ,points_sorted_on_y[mid:] ,points_counts - mid )
A_ : Optional[Any] = min(__lowercase ,__lowercase )
A_ : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__lowercase )
A_ : Tuple = dis_between_closest_in_strip(
__lowercase ,len(__lowercase ) ,__lowercase )
return min(__lowercase ,__lowercase )
def UpperCamelCase ( __lowercase : Any ,__lowercase : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = column_based_sort(__lowercase ,column=0 )
A_ : List[str] = column_based_sort(__lowercase ,column=1 )
return (
closest_pair_of_points_sqr(
__lowercase ,__lowercase ,__lowercase )
) ** 0.5
if __name__ == "__main__":
_UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 70 | from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : Any = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
| 70 | 1 |
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
return "".join(chr(ord(__lowercase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 | def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Dict = image_size
A_ : str = num_channels
A_ : Union[str, Any] = embeddings_size
A_ : Optional[Any] = hidden_sizes
A_ : Any = depths
A_ : List[str] = is_training
A_ : int = use_labels
A_ : Optional[Any] = hidden_act
A_ : List[Any] = num_labels
A_ : Optional[int] = scope
A_ : int = len(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = TFRegNetModel(config=lowercase )
A_ : Optional[Any] = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : Tuple = TFRegNetForImageClassification(lowercase )
A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = TFRegNetModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : List[Any] = model_class(lowercase )
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Any = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase )
A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
A_ : Dict = model_class(lowercase )
A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase )
A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : Any = self._prepare_for_class(lowercase , lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : Any = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Tuple = model(**lowercase , training=lowercase )
# verify the logits
A_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 70 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : List[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : str = get_activation('gelu' )
A_ : int = get_activation('gelu_10' )
A_ : Optional[int] = torch_builtin(lowercase )
A_ : Tuple = geluaa(lowercase )
A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase ):
get_activation('bogus' )
with self.assertRaises(lowercase ):
get_activation(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = get_activation('gelu' )
A_ : List[str] = 1
A_ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase ):
A_ : str = acta.a
| 70 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = KandinskyVaaPriorPipeline
lowerCamelCase_ = ['''prompt''']
lowerCamelCase_ = ['''prompt''', '''negative_prompt''']
lowerCamelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase_ = False
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 3_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 3_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1_0_0
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = {
'num_attention_heads': 2,
'attention_head_dim': 1_2,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
A_ : List[str] = PriorTransformer(**lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
A_ : str = CLIPVisionModelWithProjection(lowercase )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=lowercase , do_normalize=lowercase , do_resize=lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.dummy_prior
A_ : Optional[int] = self.dummy_image_encoder
A_ : str = self.dummy_text_encoder
A_ : Tuple = self.dummy_tokenizer
A_ : Union[str, Any] = self.dummy_image_processor
A_ : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=lowercase , clip_sample_range=10.0 , )
A_ : Dict = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def lowerCAmelCase_ ( self , lowercase , lowercase=0 ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : str = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : int = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = 'cpu'
A_ : str = self.get_dummy_components()
A_ : str = self.pipeline_class(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Tuple = pipe(**self.get_dummy_inputs(lowercase ) )
A_ : Dict = output.image_embeds
A_ : Any = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
A_ : List[str] = image[0, -1_0:]
A_ : Optional[Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
A_ : Union[str, Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = torch_device == 'cpu'
A_ : Tuple = True
A_ : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=lowercase , relax_max_difference=lowercase , test_mean_pixel_difference=lowercase , )
@skip_mps
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = torch_device == 'cpu'
A_ : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase , test_mean_pixel_difference=lowercase , )
| 70 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.convolution(self.padding(lowercase ) )
A_ : List[str] = self.normalization(lowercase )
A_ : List[Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = config.num_channels
A_ : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Optional[int] = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.pooler(lowercase )
for layer_module in self.attention:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[int] = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : Optional[int] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : int = layer_module(lowercase )
A_ : Union[str, Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Dict = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : int = max(1 , out_channels // config.groups_width )
A_ : Optional[int] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : List[str] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = hidden_state
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : str = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Dict = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[Any] = config
A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : str = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase )
A_ : Optional[int] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Dict = encoder_outputs[0]
A_ : List[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : int = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : List[Any] = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : List[Any] = self.classifier[0](lowercase )
A_ : Union[str, Any] = self.classifier[1](lowercase )
A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 70 | 1 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
A_ : Optional[Any] = key.replace('heads.cmd.mim_head.cls.predictions' ,'mmm_image_head' )
A_ : Optional[Any] = key.replace('heads.cmd.mlm_head.cls.predictions' ,'mmm_text_head' )
A_ : Tuple = key.replace('heads.cmd.itm_head.cls' ,'itm_head' )
A_ : List[str] = key.replace('heads.cmd.itm_head.pooler' ,'itm_head.pooler' )
A_ : Optional[int] = key.replace('heads.cmd.clip_head.logit_scale' ,'flava.logit_scale' )
A_ : Union[str, Any] = key.replace('heads.fairseq_mlm.cls.predictions' ,'mlm_head' )
A_ : List[Any] = key.replace('heads.imagenet.mim_head.cls.predictions' ,'mim_head' )
A_ : List[str] = key.replace('mm_text_projection' ,'flava.text_to_mm_projection' )
A_ : Tuple = key.replace('mm_image_projection' ,'flava.image_to_mm_projection' )
A_ : Optional[int] = key.replace('image_encoder.module' ,'flava.image_model' )
A_ : Union[str, Any] = key.replace('text_encoder.module' ,'flava.text_model' )
A_ : Dict = key.replace('mm_encoder.module.encoder.cls_token' ,'flava.multimodal_model.cls_token' )
A_ : int = key.replace('mm_encoder.module' ,'flava.multimodal_model' )
A_ : List[Any] = key.replace('text_projection' ,'flava.text_projection' )
A_ : Optional[int] = key.replace('image_projection' ,'flava.image_projection' )
A_ : List[str] = value.float()
for key, value in codebook_state_dict.items():
A_ : int = value
return upgrade
@torch.no_grad()
def UpperCamelCase ( __lowercase : int ,__lowercase : Tuple ,__lowercase : Tuple ,__lowercase : Tuple=None ):
'''simple docstring'''
if config_path is not None:
A_ : List[str] = FlavaConfig.from_pretrained(__lowercase )
else:
A_ : Dict = FlavaConfig()
A_ : Union[str, Any] = FlavaForPreTraining(__lowercase ).eval()
A_ : int = convert_dalle_checkpoint(__lowercase ,__lowercase ,save_checkpoint=__lowercase )
if os.path.exists(__lowercase ):
A_ : str = torch.load(__lowercase ,map_location='cpu' )
else:
A_ : Optional[int] = torch.hub.load_state_dict_from_url(__lowercase ,map_location='cpu' )
A_ : Any = upgrade_state_dict(__lowercase ,__lowercase )
hf_model.load_state_dict(__lowercase )
A_ : Optional[int] = hf_model.state_dict()
A_ : List[str] = count_parameters(__lowercase )
A_ : int = count_parameters(__lowercase ) + count_parameters(__lowercase )
assert torch.allclose(__lowercase ,__lowercase ,atol=1e-3 )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_UpperCAmelCase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 70 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 1 |
import argparse
import json
import subprocess
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : int ):
'''simple docstring'''
A_ : Dict = []
A_ : List[Any] = (
f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
A_ : Union[str, Any] = subprocess.run(__lowercase ,shell=__lowercase ,stdout=subprocess.PIPE )
A_ : List[str] = output.stdout.decode('utf-8' )
A_ : Optional[Any] = json.loads(__lowercase )
A_ : Dict = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__lowercase )
# save the result so we can report them on Slack
with open('offline_runners.txt' ,'w' ) as fp:
fp.write(json.dumps(__lowercase ) )
if len(__lowercase ) > 0:
A_ : Union[str, Any] = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(f'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
return values.split(',' )
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
_UpperCAmelCase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 70 | def UpperCamelCase ( __lowercase : list ):
'''simple docstring'''
A_ : str = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
A_ , A_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 70 | 1 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = RoFormerTokenizer
lowerCamelCase_ = RoFormerTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowercase )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = '永和服装饰品有限公司,今天天气非常好'
A_ : Any = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.get_tokenizer()
A_ , A_ : List[str] = self.get_chinese_input_output_texts()
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , output_text.split() )
A_ : Optional[int] = tokens + [tokenizer.unk_token]
A_ : Optional[Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.get_rust_tokenizer()
A_ , A_ : Any = self.get_chinese_input_output_texts()
A_ : str = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , output_text.split() )
A_ : Union[str, Any] = tokens + [tokenizer.unk_token]
A_ : List[str] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
| 70 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''wavlm'''
def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : List[Any] = hidden_size
A_ : Tuple = feat_extract_norm
A_ : Dict = feat_extract_activation
A_ : Optional[Any] = list(lowercase )
A_ : Union[str, Any] = list(lowercase )
A_ : List[str] = list(lowercase )
A_ : str = conv_bias
A_ : Tuple = num_buckets
A_ : Union[str, Any] = max_bucket_distance
A_ : int = num_conv_pos_embeddings
A_ : str = num_conv_pos_embedding_groups
A_ : str = len(self.conv_dim )
A_ : Tuple = num_hidden_layers
A_ : Tuple = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[Any] = num_attention_heads
A_ : str = hidden_dropout
A_ : Optional[int] = attention_dropout
A_ : Optional[Any] = activation_dropout
A_ : Optional[int] = feat_proj_dropout
A_ : List[Any] = final_dropout
A_ : Union[str, Any] = layerdrop
A_ : Dict = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : str = num_ctc_classes
A_ : Any = vocab_size
A_ : str = do_stable_layer_norm
A_ : int = use_weighted_layer_sum
A_ : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : List[str] = apply_spec_augment
A_ : Optional[Any] = mask_time_prob
A_ : int = mask_time_length
A_ : Any = mask_time_min_masks
A_ : Optional[int] = mask_feature_prob
A_ : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
A_ : int = num_codevectors_per_group
A_ : Any = num_codevector_groups
A_ : List[Any] = contrastive_logits_temperature
A_ : Optional[Any] = num_negatives
A_ : Optional[Any] = codevector_dim
A_ : int = proj_codevector_dim
A_ : int = diversity_loss_weight
# ctc loss
A_ : Union[str, Any] = ctc_loss_reduction
A_ : Any = ctc_zero_infinity
# adapter
A_ : int = add_adapter
A_ : Optional[Any] = adapter_kernel_size
A_ : Optional[int] = adapter_stride
A_ : Dict = num_adapter_layers
A_ : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : Tuple = list(lowercase )
A_ : Optional[Any] = list(lowercase )
A_ : Dict = list(lowercase )
A_ : Dict = xvector_output_dim
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 70 | 1 |
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
assert column_title.isupper()
A_ : Union[str, Any] = 0
A_ : Optional[int] = len(__lowercase ) - 1
A_ : Optional[int] = 0
while index >= 0:
A_ : Optional[Any] = (ord(column_title[index] ) - 64) * pow(26 ,__lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 | import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger()
def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase )
else:
A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase )
if hidden_sizes == 1_92:
A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase )
if hidden_sizes == 2_56:
A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase )
if hidden_sizes == 3_84:
A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase )
from_model.eval()
A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval()
A_ : Union[str, Any] = OrderedDict()
A_ : Dict = from_model.state_dict()
A_ : Tuple = list(from_model.state_dict().keys() )
A_ : str = list(our_model.state_dict().keys() )
print(len(__lowercase ) ,len(__lowercase ) )
for i in range(len(__lowercase ) ):
A_ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowercase )
A_ : str = torch.randn((2, 3, 2_24, 2_24) )
A_ : str = from_model(__lowercase )
A_ : Optional[Any] = our_model(__lowercase ).logits
assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one."
A_ : List[str] = name
print(__lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A_ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ):
'''simple docstring'''
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : Optional[int] = 10_00
A_ : Optional[int] = (1, num_labels)
A_ : int = 'huggingface/label-files'
A_ : int = num_labels
A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase )
A_ : Any = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
A_ : Tuple = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 70 | 1 |
from random import randint, random
def UpperCamelCase ( __lowercase : int ,__lowercase : int ,__lowercase : int ,__lowercase : bool = False ,__lowercase : bool = False ,__lowercase : int = 5 ,):
'''simple docstring'''
A_ : List[Any] = [[-1] * number_of_cells] # Create a highway without any car
A_ : Optional[Any] = 0
A_ : int = max(__lowercase ,0 )
while i < number_of_cells:
A_ : Any = (
randint(0 ,__lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 ,max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCamelCase ( __lowercase : list ,__lowercase : int ):
'''simple docstring'''
A_ : Union[str, Any] = 0
A_ : str = highway_now[car_index + 1 :]
for cell in range(len(__lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__lowercase ,-1 )
def UpperCamelCase ( __lowercase : list ,__lowercase : float ,__lowercase : int ):
'''simple docstring'''
A_ : Any = len(__lowercase )
# Beforce calculations, the highway is empty
A_ : Tuple = [-1] * number_of_cells
for car_index in range(__lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
A_ : str = min(highway_now[car_index] + 1 ,__lowercase )
# Number of empty cell before the next car
A_ : Optional[Any] = get_distance(__lowercase ,__lowercase ) - 1
# We can't have the car causing an accident
A_ : Union[str, Any] = min(next_highway[car_index] ,__lowercase )
if random() < probability:
# Randomly, a driver will slow down
A_ : int = max(next_highway[car_index] - 1 ,0 )
return next_highway
def UpperCamelCase ( __lowercase : list ,__lowercase : int ,__lowercase : float ,__lowercase : int ):
'''simple docstring'''
A_ : Dict = len(highway[0] )
for i in range(__lowercase ):
A_ : Any = update(highway[i] ,__lowercase ,__lowercase )
A_ : List[Any] = [-1] * number_of_cells
for car_index in range(__lowercase ):
A_ : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
A_ : Any = (car_index + speed) % number_of_cells
# Commit the change of position
A_ : Union[str, Any] = speed
highway.append(__lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | def UpperCamelCase ( __lowercase : str ,__lowercase : int ):
'''simple docstring'''
A_ : int = word.split()
def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str:
A_ : Optional[Any] = max_width - width
A_ : Union[str, Any] = len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A_ : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A_ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A_ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
A_ : Tuple = []
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
A_ : List[str] = []
A_ : list[str] = []
A_ : Dict = 0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase ,__lowercase ,__lowercase ) )
# reset new line and new width
A_ , A_ : Any = [word], len(__lowercase )
A_ : int = max_width - width - len(__lowercase )
answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCAmelCase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCAmelCase = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Any = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' ,__lowercase )
return [m.group(0 ) for m in matches]
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A_ : Dict = {
config.replace('Config' ,'' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
A_ : List[Any] = collections.defaultdict(__lowercase )
A_ : Optional[int] = collections.defaultdict(__lowercase )
A_ : Tuple = collections.defaultdict(__lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowercase ):
A_ : Tuple = None
if _re_tf_models.match(__lowercase ) is not None:
A_ : List[str] = tf_models
A_ : Any = _re_tf_models.match(__lowercase ).groups()[0]
elif _re_flax_models.match(__lowercase ) is not None:
A_ : Tuple = flax_models
A_ : int = _re_flax_models.match(__lowercase ).groups()[0]
elif _re_pt_models.match(__lowercase ) is not None:
A_ : Tuple = pt_models
A_ : Tuple = _re_pt_models.match(__lowercase ).groups()[0]
if lookup_dict is not None:
while len(__lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
A_ : Any = True
break
# Try again after removing the last word in the name
A_ : Dict = ''.join(camel_case_split(__lowercase )[:-1] )
A_ : Any = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
A_ : Tuple = list(__lowercase )
all_models.sort()
A_ : str = {'model_type': all_models}
A_ : Optional[int] = [pt_models[t] for t in all_models]
A_ : str = [tf_models[t] for t in all_models]
A_ : List[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
A_ : List[Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
A_ : List[Any] = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
A_ : Optional[int] = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
A_ : List[str] = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
A_ : Dict = 'AutoTokenizer'
A_ : List[Any] = [processors[t] for t in all_models]
return pd.DataFrame(__lowercase )
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : Tuple = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
A_ : Any = [model_mapping, f'''TF_{model_mapping}''', f'''FLAX_{model_mapping}''']
A_ : Optional[Any] = [auto_class, f'''TF_{auto_class}''', f'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(__lowercase ,__lowercase ,__lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowercase ,__lowercase ):
continue
# First extract all model_names
A_ : Optional[int] = []
for name in getattr(__lowercase ,__lowercase ).values():
if isinstance(__lowercase ,__lowercase ):
model_names.append(__lowercase )
else:
model_names.extend(list(__lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Any = get_frameworks_table()
A_ : Any = Dataset.from_pandas(__lowercase )
A_ : Any = hf_hub_download(
'huggingface/transformers-metadata' ,'pipeline_tags.json' ,repo_type='dataset' ,token=__lowercase )
A_ : Dict = Dataset.from_json(__lowercase )
A_ : Any = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(__lowercase ) )
}
A_ : str = update_pipeline_and_auto_class_table(__lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
A_ : Optional[int] = sorted(table.keys() )
A_ : Union[str, Any] = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
A_ : Any = Dataset.from_pandas(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowercase ,'frameworks.json' ) )
tags_dataset.to_json(os.path.join(__lowercase ,'pipeline_tags.json' ) )
if commit_sha is not None:
A_ : Optional[Any] = (
f'''Update with commit {commit_sha}\n\nSee: '''
f'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
A_ : Optional[int] = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' ,folder_path=__lowercase ,repo_type='dataset' ,token=__lowercase ,commit_message=__lowercase ,)
def UpperCamelCase ( ):
'''simple docstring'''
A_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
A_ : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
A_ : Optional[int] = []
for key in pipeline_tasks:
if key not in in_table:
A_ : str = pipeline_tasks[key]['pt']
if isinstance(__lowercase ,(list, tuple) ):
A_ : Dict = model[0]
A_ : Optional[int] = model.__name__
if model not in in_table.values():
missing.append(__lowercase )
if len(__lowercase ) > 0:
A_ : Any = ', '.join(__lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
_UpperCAmelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 70 | import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''summarization'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ROUGE_KEYS
lowerCamelCase_ = '''rouge2'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
A_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
A_ : List[str] = Path(self.output_dir ) / 'metrics.json'
A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
A_ : str = 0
A_ : Any = defaultdict(lowercase )
A_ : Union[str, Any] = self.config.model_type
A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
A_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ : Optional[Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ : Tuple = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ : int = get_git_info()['repo_sha']
A_ : int = hparams.num_workers
A_ : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ : Any = self.decoder_start_token_id
A_ : str = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
A_ : Union[str, Any] = False
A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ : int = self.hparams.eval_max_gen_length
else:
A_ : List[Any] = self.model.config.max_length
A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
A_ : int = True
return readable_batch
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
return self.model(lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer.pad_token_id
A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask']
A_ : str = batch['labels']
if isinstance(self.model , lowercase ):
A_ : Optional[int] = self.model._shift_right(lowercase )
else:
A_ : Any = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ : Optional[Any] = decoder_input_ids
self.save_readable_batch(lowercase )
A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
A_ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 )
A_ , A_ : Any = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self._step(lowercase )
A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
A_ : str = batch['input_ids'].shape[0]
A_ : Any = batch['input_ids'].eq(self.pad ).sum()
A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase="val" ):
"""simple docstring"""
self.step_count += 1
A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ : Dict = losses['loss']
A_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
A_ : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ : Tuple = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
A_ : Dict = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_rouge(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ : Optional[int] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ : int = (time.time() - ta) / batch['input_ids'].shape[0]
A_ : List[str] = self.ids_to_clean_text(lowercase )
A_ : List[str] = self.ids_to_clean_text(batch['labels'] )
A_ : List[Any] = self._step(lowercase )
A_ : int = dict(zip(self.loss_names , lowercase ) )
A_ : Dict = self.calc_generative_metrics(lowercase , lowercase )
A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.validation_epoch_end(lowercase , prefix='test' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.n_obs[type_path]
A_ : List[Any] = self.target_lens[type_path]
A_ : str = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
A_ : Optional[int] = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase )
parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase )
parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
'--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''translation'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ['''bleu''']
lowerCamelCase_ = '''bleu'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , **lowercase )
A_ : List[Any] = hparams.src_lang
A_ : str = hparams.tgt_lang
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_bleu(lowercase , lowercase )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ : SummarizationModule = SummarizationModule(__lowercase )
else:
A_ : SummarizationModule = TranslationModule(__lowercase )
A_ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
A_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase )
A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
A_ : str = False
A_ : Dict = args.val_metric == 'loss'
A_ : pl.Trainer = generic_train(
__lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,)
pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
A_ : Optional[Any] = ''
A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) )
if checkpoints:
A_ : List[Any] = checkpoints[-1]
A_ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
_UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase = parser.parse_args()
main(args)
| 70 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = value
A_ : Node | None = None
A_ : Node | None = None
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
A_ : str = tree
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Dict = image_size
A_ : str = num_channels
A_ : Union[str, Any] = embeddings_size
A_ : Optional[Any] = hidden_sizes
A_ : Any = depths
A_ : List[str] = is_training
A_ : int = use_labels
A_ : Optional[Any] = hidden_act
A_ : List[Any] = num_labels
A_ : Optional[int] = scope
A_ : int = len(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = TFRegNetModel(config=lowercase )
A_ : Optional[Any] = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : Tuple = TFRegNetForImageClassification(lowercase )
A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = TFRegNetModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : List[Any] = model_class(lowercase )
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Any = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase )
A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
A_ : Dict = model_class(lowercase )
A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase )
A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : Any = self._prepare_for_class(lowercase , lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : Any = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Tuple = model(**lowercase , training=lowercase )
# verify the logits
A_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 70 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 70 | def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : int = 0
while b > 0:
if b & 1:
A_ : Any = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 70 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 70 | def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = BertTokenizer
lowerCamelCase_ = BertTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = filter_non_english
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
A_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = 'UNwant\u00E9d,running'
A_ : Optional[int] = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.tokenizer_class(self.vocab_file )
A_ : int = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A_ : List[str] = self.get_tokenizer()
A_ : Optional[Any] = self.get_rust_tokenizer()
A_ : Optional[Any] = 'UNwant\u00E9d,running'
A_ : str = tokenizer.tokenize(lowercase )
A_ : Dict = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : int = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A_ : Dict = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : Tuple = self.get_rust_tokenizer()
A_ : Optional[int] = tokenizer.encode(lowercase )
A_ : Any = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
# With lower casing
A_ : Union[str, Any] = self.get_tokenizer(do_lower_case=lowercase )
A_ : Tuple = self.get_rust_tokenizer(do_lower_case=lowercase )
A_ : List[Any] = 'UNwant\u00E9d,running'
A_ : Optional[int] = tokenizer.tokenize(lowercase )
A_ : Union[str, Any] = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A_ : List[Any] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : str = self.get_rust_tokenizer()
A_ : Tuple = tokenizer.encode(lowercase )
A_ : Union[str, Any] = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = BasicTokenizer()
A_ : int = 'a\n\'ll !!to?\'d of, can\'t.'
A_ : Any = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
A_ : Optional[Any] = {}
for i, token in enumerate(lowercase ):
A_ : Dict = i
A_ : Any = WordpieceTokenizer(vocab=lowercase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.tokenizer_class.from_pretrained('bert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : int = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ : int = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
A_ : Union[str, Any] = tokenizer_r.encode_plus(
lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , )
A_ : List[Any] = tokenizer_r.do_lower_case if hasattr(lowercase , 'do_lower_case' ) else False
A_ : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = ['的', '人', '有']
A_ : int = ''.join(lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ : int = True
A_ : List[str] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Any = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
A_ : int = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
A_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowercase )
A_ : Any = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : str = False
A_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : List[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Union[str, Any] = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
A_ : Tuple = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
A_ : Any = tokenizer_r.convert_ids_to_tokens(lowercase )
A_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
A_ : Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowercase )
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
| 70 | from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
_UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = RealmTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
A_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars
):
A_ : Optional[int] = getattr(lowercase , normalizer_state.pop('type' ) )
A_ : Optional[Any] = do_lower_case
A_ : Union[str, Any] = strip_accents
A_ : Optional[int] = tokenize_chinese_chars
A_ : Union[str, Any] = normalizer_class(**lowercase )
A_ : Tuple = do_lower_case
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
A_ : Optional[int] = PaddingStrategy.MAX_LENGTH
A_ : Optional[int] = text
A_ : int = kwargs.pop('text_pair' , lowercase )
A_ : List[Any] = kwargs.pop('return_tensors' , lowercase )
A_ : Any = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(lowercase ):
if batch_text_pair is not None:
A_ : Union[str, Any] = batch_text_pair[idx]
else:
A_ : List[str] = None
A_ : Dict = super().__call__(lowercase , lowercase , return_tensors=lowercase , **lowercase )
A_ : Tuple = encoded_candidates.get('input_ids' )
A_ : Dict = encoded_candidates.get('attention_mask' )
A_ : str = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowercase )
A_ : List[Any] = {key: item for key, item in output_data.items() if len(lowercase ) != 0}
return BatchEncoding(lowercase , tensor_type=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
A_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : str = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 70 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : str = WATERMARK_BITS
A_ : Dict = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if images.shape[-1] < 2_5_6:
return images
A_ : str = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A_ : Optional[Any] = [self.encoder.encode(lowercase , 'dwtDct' ) for image in images]
A_ : Optional[int] = torch.from_numpy(np.array(lowercase ) ).permute(0 , 3 , 1 , 2 )
A_ : Optional[int] = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 70 | from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 70 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_UpperCAmelCase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | import random
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = num - 1
A_ : Optional[Any] = 0
while s % 2 == 0:
A_ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
A_ : Optional[int] = random.randrange(2 ,num - 1 )
A_ : Any = pow(__lowercase ,__lowercase ,__lowercase )
if v != 1:
A_ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A_ : Union[str, Any] = i + 1
A_ : Tuple = (v**2) % num
return True
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if num < 2:
return False
A_ : Optional[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def UpperCamelCase ( __lowercase : int = 10_24 ):
'''simple docstring'''
while True:
A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
_UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 70 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def UpperCamelCase ( __lowercase : Optional[int]=None ,__lowercase : Optional[int]=None ):
'''simple docstring'''
return field(default_factory=lambda: default ,metadata=__lowercase )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = field(
metadata={'''help''': '''The csv file to plot.'''} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
lowerCamelCase_ = field(
default=__A , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
lowerCamelCase_ = list_field(
default=__A , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
try:
int(__lowercase )
return True
except ValueError:
return False
def UpperCamelCase ( __lowercase : Optional[int] ):
'''simple docstring'''
try:
float(__lowercase )
return True
except ValueError:
return False
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = args
A_ : Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
A_ : int = csv.DictReader(lowercase )
for row in reader:
A_ : Optional[int] = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
A_ : Optional[Any] = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
A_ : List[Any] = float(row['result'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Tuple = plt.subplots()
A_ : Tuple = 'Time usage' if self.args.is_time else 'Memory usage'
A_ : Any = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
A_ : Any = sorted(set(self.result_dict[model_name]['bsz'] ) )
A_ : int = sorted(set(self.result_dict[model_name]['seq_len'] ) )
A_ : Optional[Any] = self.result_dict[model_name]['result']
((A_) , (A_)) : List[str] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
A_ : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
A_ : int = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowercase , )
else:
A_ : List[str] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((A_) , (A_)) : Union[str, Any] = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
A_ : str = np.asarray(lowercase , lowercase )[: len(lowercase )]
plt.scatter(
lowercase , lowercase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(lowercase , lowercase , '--' )
title_str += F''' {label_model_name} vs.'''
A_ : Any = title_str[:-4]
A_ : Any = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowercase )
plt.xlabel(lowercase )
plt.ylabel(lowercase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Any = HfArgumentParser(__lowercase )
A_ : Tuple = parser.parse_args_into_dataclasses()[0]
A_ : Optional[int] = Plot(args=__lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 70 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_UpperCAmelCase = ["""gpt2"""]
_UpperCAmelCase = """gpt2"""
if is_tf_available():
class UpperCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
super().__init__()
A_ : Tuple = tokenizer
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
A_ : List[str] = TFGPTaLMHeadModel.from_config(lowercase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.tokenizer(lowercase )
A_ : int = tokenized['input_ids'].to_tensor()
A_ : Dict = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
A_ : List[Any] = self.model(input_ids=lowercase , attention_mask=lowercase )['logits']
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
A_ : List[str] = [GPTaTokenizer.from_pretrained(lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
A_ : Union[str, Any] = [TFGPTaTokenizer.from_pretrained(lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A_ : List[str] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
A_ : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
A_ : str = tokenizer([test_inputs] , return_tensors='tf' )
A_ : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
A_ : Dict = python_outputs[key].numpy()
A_ : List[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
A_ : List[str] = tf.function(lowercase )
for test_inputs in self.test_sentences:
A_ : Tuple = tf.constant(lowercase )
A_ : str = compiled_tokenizer(lowercase )
A_ : Optional[Any] = tf_tokenizer(lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
A_ : Any = ModelToSave(tokenizer=lowercase )
A_ : Any = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : Union[str, Any] = model.serving(lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A_ : Tuple = Path(lowercase ) / 'saved.model'
tf.saved_model.save(lowercase , lowercase , signatures={'serving_default': model.serving} )
A_ : Dict = tf.saved_model.load(lowercase )
A_ : str = loaded_model.signatures['serving_default'](lowercase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
A_ : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : str = tf_tokenizer(lowercase ) # Build model with some sample inputs
A_ : Union[str, Any] = tf_tokenizer.get_config()
A_ : List[Any] = TFGPTaTokenizer.from_config(lowercase )
A_ : Union[str, Any] = model_from_config(lowercase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
A_ : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
A_ : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : Optional[int] = tf_tokenizer(lowercase , max_length=lowercase )
A_ : List[Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 70 | import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = 4
A_ : int = 3
A_ : List[str] = (3_2, 3_2)
A_ : Any = jax.random.PRNGKey(0 )
A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A_ : int = self.dummy_input
return init_dict, inputs_dict
| 70 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = StableDiffusionLDMaDPipeline
lowerCamelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
A_ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
A_ : Optional[Any] = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase_ ( self , lowercase , lowercase=0 ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : List[Any] = torch.manual_seed(lowercase )
else:
A_ : int = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : Dict = self.get_dummy_components()
A_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowercase )
A_ : str = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : List[str] = ldmad_pipe(**lowercase )
A_ , A_ : List[Any] = output.rgb, output.depth
A_ : int = rgb[0, -3:, -3:, -1]
A_ : Dict = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
A_ : Any = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
A_ : Dict = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.get_dummy_components()
A_ : Any = StableDiffusionLDMaDPipeline(**lowercase )
A_ : Optional[int] = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[Any] = self.get_dummy_inputs(lowercase )
A_ : Optional[int] = 3 * [inputs['prompt']]
# forward
A_ : int = ldmad_pipe(**lowercase )
A_ , A_ : List[str] = output.rgb, output.depth
A_ : str = rgb_slice_a[0, -3:, -3:, -1]
A_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
A_ : Union[str, Any] = self.get_dummy_inputs(lowercase )
A_ : List[Any] = 3 * [inputs.pop('prompt' )]
A_ : int = ldmad_pipe.tokenizer(
lowercase , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
A_ : int = text_inputs['input_ids'].to(lowercase )
A_ : Optional[Any] = ldmad_pipe.text_encoder(lowercase )[0]
A_ : int = prompt_embeds
# forward
A_ : str = ldmad_pipe(**lowercase )
A_ , A_ : List[str] = output.rgb, output.depth
A_ : Optional[int] = rgb_slice_a[0, -3:, -3:, -1]
A_ : List[str] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : Optional[int] = self.get_dummy_components()
A_ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowercase )
A_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowercase )
A_ : Optional[Any] = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : Dict = self.get_dummy_inputs(lowercase )
A_ : Any = 'french fries'
A_ : Optional[int] = ldmad_pipe(**lowercase , negative_prompt=lowercase )
A_ , A_ : Any = output.rgb, output.depth
A_ : str = rgb[0, -3:, -3:, -1]
A_ : List[str] = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
A_ : List[str] = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
A_ : str = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self , lowercase , lowercase="cpu" , lowercase=torch.floataa , lowercase=0 ):
"""simple docstring"""
A_ : int = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = np.random.RandomState(lowercase ).standard_normal((1, 4, 6_4, 6_4) )
A_ : int = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
A_ : Tuple = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
A_ : List[Any] = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : Tuple = self.get_inputs(lowercase )
A_ : int = ldmad_pipe(**lowercase )
A_ , A_ : Union[str, Any] = output.rgb, output.depth
A_ : Dict = rgb[0, -3:, -3:, -1].flatten()
A_ : int = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
A_ : List[str] = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
A_ : Any = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self , lowercase , lowercase="cpu" , lowercase=torch.floataa , lowercase=0 ):
"""simple docstring"""
A_ : str = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Optional[Any] = np.random.RandomState(lowercase ).standard_normal((1, 4, 6_4, 6_4) )
A_ : Optional[int] = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
A_ : Tuple = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 5_0,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : List[Any] = self.get_inputs(lowercase )
A_ : int = ldmad_pipe(**lowercase )
A_ , A_ : Any = output.rgb, output.depth
A_ : str = 0.49_5586
A_ : List[Any] = 0.3379_5515
A_ : Optional[Any] = 112.4_8518
A_ : int = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : List[Any] = self.get_inputs(lowercase )
A_ : int = ldmad_pipe(**lowercase )
A_ , A_ : Tuple = output.rgb, output.depth
A_ : List[str] = 0.419_4127
A_ : int = 0.3537_5586
A_ : int = 0.563_8502
A_ : Optional[Any] = 0.3468_6103
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 70 | import numpy as np
_UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Any = np.array(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = message.lower()
A_ : Tuple = message.replace(' ' , '' )
A_ : int = message.replace('j' , 'i' )
A_ : Any = np.empty((2, len(lowercase )) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
A_ : Union[str, Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) )
A_ : int = ''
for numbers_index in range(len(lowercase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : str = int(second_step[(numbers_index * 2) + 1] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = message.lower()
message.replace(' ' , '' )
A_ : Tuple = np.empty(2 * len(lowercase ) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Optional[int] = numbers[0]
A_ : Dict = numbers[1]
A_ : Optional[int] = first_step.reshape((2, len(lowercase )) )
A_ : List[str] = ''
for numbers_index in range(len(lowercase ) ):
A_ : List[Any] = int(second_step[0, numbers_index] )
A_ : Optional[int] = int(second_step[1, numbers_index] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : str = decoded_message + letter
return decoded_message
| 70 | 1 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase )
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = None , lowercase = 5_0 , lowercase = "pil" , lowercase = True , **lowercase , ):
"""simple docstring"""
A_ : List[str] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowercase , )
A_ : Tuple = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A_ : str = self.unet(lowercase , lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A_ : Dict = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
A_ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
A_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : List[Any] = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowercase ), "This is a local test"
| 70 | from math import sqrt
def UpperCamelCase ( __lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : int = 0
A_ : int = 0
A_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowercase ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' ,type=__lowercase ,default='data/dump.txt' ,help='The path to the data.' )
parser.add_argument('--tokenizer_type' ,type=__lowercase ,default='bert' ,choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' ,type=__lowercase ,default='bert-base-uncased' ,help='The tokenizer to use.' )
parser.add_argument('--dump_file' ,type=__lowercase ,default='data/dump' ,help='The dump file prefix.' )
A_ : List[str] = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
A_ : Optional[Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
A_ : Union[str, Any] = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
A_ : Optional[int] = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
A_ : Tuple = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A_ : Optional[Any] = tokenizer.special_tokens_map['cls_token'] # `<s>`
A_ : Dict = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
A_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A_ : Optional[int] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
A_ : Optional[Any] = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path ,'r' ,encoding='utf8' ) as fp:
A_ : str = fp.readlines()
logger.info('Start encoding' )
logger.info(f'''{len(__lowercase )} examples to process.''' )
A_ : Union[str, Any] = []
A_ : Optional[Any] = 0
A_ : Union[str, Any] = 1_00_00
A_ : Union[str, Any] = time.time()
for text in data:
A_ : Tuple = f'''{bos} {text.strip()} {sep}'''
A_ : List[Any] = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
rslt.append(__lowercase )
iter += 1
if iter % interval == 0:
A_ : str = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
A_ : str = time.time()
logger.info('Finished binarization' )
logger.info(f'''{len(__lowercase )} examples processed.''' )
A_ : int = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
A_ : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
A_ : int = [np.uintaa(__lowercase ) for d in rslt]
else:
A_ : str = [np.intaa(__lowercase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(__lowercase ,'wb' ) as handle:
pickle.dump(rslt_ ,__lowercase ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 70 | import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ):
"""simple docstring"""
super().__init__()
A_ : Tuple = initial_learning_rate
A_ : List[str] = warmup_steps
A_ : int = power
A_ : Dict = decay_schedule_fn
A_ : Any = name
def __call__( self , lowercase ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A_ : Optional[int] = tf.cast(lowercase , tf.floataa )
A_ : int = tf.cast(self.warmup_steps , tf.floataa )
A_ : Optional[int] = global_step_float / warmup_steps_float
A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,):
'''simple docstring'''
A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,)
if num_warmup_steps:
A_ : Tuple = WarmUp(
initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,)
if weight_decay_rate > 0.0:
A_ : Union[str, Any] = AdamWeightDecay(
learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,)
else:
A_ : Dict = tf.keras.optimizers.Adam(
learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ):
"""simple docstring"""
super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
A_ : Dict = weight_decay_rate
A_ : Union[str, Any] = include_in_weight_decay
A_ : str = exclude_from_weight_decay
@classmethod
def lowerCAmelCase_ ( cls , lowercase ):
"""simple docstring"""
A_ : Tuple = {'WarmUp': WarmUp}
return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase )
A_ : Optional[Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ):
"""simple docstring"""
A_ , A_ : Optional[int] = list(zip(*lowercase ) )
return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A_ : List[str] = apply_state or {}
A_ : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A_ : Dict = self._fallback_apply_state(lowercase , lowercase )
A_ : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return False
return True
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : int = []
A_ : Optional[int] = None
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self._accum_steps is None:
A_ : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowercase ):
"""simple docstring"""
if not self._gradients:
A_ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' )
for accum_gradient, gradient in zip(self._gradients , lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase ) )
| 70 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase ( __lowercase : dict ,__lowercase : str ,__lowercase : set ,__lowercase : set ,__lowercase : dict ,__lowercase : dict ,__lowercase : PriorityQueue ,__lowercase : dict ,__lowercase : float | int ,):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ : Optional[Any] = cst_fwd.get(__lowercase ,np.inf )
A_ : List[Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ : Optional[Any] = new_cost_f
A_ : Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ : List[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : Union[str, Any] = -1
A_ : int = set()
A_ : Optional[Any] = set()
A_ : Dict = {source: 0}
A_ : str = {destination: 0}
A_ : Any = {source: None}
A_ : List[Any] = {destination: None}
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : Optional[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ : Optional[int] = queue_forward.get()
visited_forward.add(__lowercase )
A_ , A_ : List[Any] = queue_backward.get()
visited_backward.add(__lowercase )
A_ : int = pass_and_relaxation(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,)
A_ : Any = pass_and_relaxation(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ : List[Any] = shortest_distance
return shortest_path_distance
_UpperCAmelCase = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
_UpperCAmelCase = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : Any = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
| 70 | 1 |
import sys
_UpperCAmelCase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def UpperCamelCase ( __lowercase : str = N ):
'''simple docstring'''
A_ : List[Any] = -sys.maxsize - 1
for i in range(len(__lowercase ) - 12 ):
A_ : List[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A_ : int = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70 | 1 |
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : List[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : str = get_activation('gelu' )
A_ : int = get_activation('gelu_10' )
A_ : Optional[int] = torch_builtin(lowercase )
A_ : Tuple = geluaa(lowercase )
A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase ):
get_activation('bogus' )
with self.assertRaises(lowercase ):
get_activation(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = get_activation('gelu' )
A_ : List[str] = 1
A_ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase ):
A_ : str = acta.a
| 70 | 1 |
# Algorithm for the pigeonhole sorting
def UpperCamelCase ( __lowercase : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = min(__lowercase ) # min() finds the minimum value
A_ : Tuple = max(__lowercase ) # max() finds the maximum value
A_ : str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
A_ : Tuple = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__lowercase ,__lowercase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
A_ : str = 0
for count in range(__lowercase ):
while holes[count] > 0:
holes[count] -= 1
A_ : Tuple = count + min_val
i += 1
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Any = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__lowercase )
print('Sorted order is:' ,' '.join(__lowercase ) )
if __name__ == "__main__":
main()
| 70 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.convolution(self.padding(lowercase ) )
A_ : List[str] = self.normalization(lowercase )
A_ : List[Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = config.num_channels
A_ : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Optional[int] = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.pooler(lowercase )
for layer_module in self.attention:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[int] = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : Optional[int] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : int = layer_module(lowercase )
A_ : Union[str, Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Dict = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : int = max(1 , out_channels // config.groups_width )
A_ : Optional[int] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : List[str] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = hidden_state
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : str = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Dict = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[Any] = config
A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : str = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase )
A_ : Optional[int] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Dict = encoder_outputs[0]
A_ : List[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : int = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : List[Any] = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : List[Any] = self.classifier[0](lowercase )
A_ : Union[str, Any] = self.classifier[1](lowercase )
A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 70 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
_UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : str = git.Repo(search_parent_directories=__lowercase )
A_ : int = {
'repo_id': str(__lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(__lowercase ,'git_log.json' ) ,'w' ) as f:
json.dump(__lowercase ,__lowercase ,indent=4 )
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
if params.n_gpu <= 0:
A_ : Any = 0
A_ : List[Any] = -1
A_ : int = True
A_ : List[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
A_ : Optional[int] = int(os.environ['WORLD_SIZE'] )
A_ : Optional[Any] = int(os.environ['N_GPU_NODE'] )
A_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
A_ : Dict = params.world_size // params.n_gpu_per_node
A_ : str = params.global_rank // params.n_gpu_per_node
A_ : Union[str, Any] = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
A_ : int = 1
A_ : Optional[Any] = 0
A_ : Any = 0
A_ : List[Any] = 0
A_ : int = 1
A_ : Optional[Any] = 1
A_ : Optional[Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
A_ : Optional[Any] = params.node_id == 0 and params.local_rank == 0
A_ : int = params.n_nodes > 1
# summary
A_ : Tuple = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' ,backend='nccl' ,)
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 70 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 1 |
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bert-generation'''
def __init__( self , lowercase=5_0_3_5_8 , lowercase=1_0_2_4 , lowercase=2_4 , lowercase=1_6 , lowercase=4_0_9_6 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase=2 , lowercase=1 , lowercase="absolute" , lowercase=True , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : int = vocab_size
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Union[str, Any] = hidden_act
A_ : Union[str, Any] = intermediate_size
A_ : str = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : List[str] = initializer_range
A_ : Tuple = layer_norm_eps
A_ : int = position_embedding_type
A_ : Optional[int] = use_cache
| 70 | def UpperCamelCase ( __lowercase : list ):
'''simple docstring'''
A_ : str = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
A_ , A_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 70 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 70 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''wavlm'''
def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : List[Any] = hidden_size
A_ : Tuple = feat_extract_norm
A_ : Dict = feat_extract_activation
A_ : Optional[Any] = list(lowercase )
A_ : Union[str, Any] = list(lowercase )
A_ : List[str] = list(lowercase )
A_ : str = conv_bias
A_ : Tuple = num_buckets
A_ : Union[str, Any] = max_bucket_distance
A_ : int = num_conv_pos_embeddings
A_ : str = num_conv_pos_embedding_groups
A_ : str = len(self.conv_dim )
A_ : Tuple = num_hidden_layers
A_ : Tuple = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[Any] = num_attention_heads
A_ : str = hidden_dropout
A_ : Optional[int] = attention_dropout
A_ : Optional[Any] = activation_dropout
A_ : Optional[int] = feat_proj_dropout
A_ : List[Any] = final_dropout
A_ : Union[str, Any] = layerdrop
A_ : Dict = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : str = num_ctc_classes
A_ : Any = vocab_size
A_ : str = do_stable_layer_norm
A_ : int = use_weighted_layer_sum
A_ : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : List[str] = apply_spec_augment
A_ : Optional[Any] = mask_time_prob
A_ : int = mask_time_length
A_ : Any = mask_time_min_masks
A_ : Optional[int] = mask_feature_prob
A_ : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
A_ : int = num_codevectors_per_group
A_ : Any = num_codevector_groups
A_ : List[Any] = contrastive_logits_temperature
A_ : Optional[Any] = num_negatives
A_ : Optional[Any] = codevector_dim
A_ : int = proj_codevector_dim
A_ : int = diversity_loss_weight
# ctc loss
A_ : Union[str, Any] = ctc_loss_reduction
A_ : Any = ctc_zero_infinity
# adapter
A_ : int = add_adapter
A_ : Optional[Any] = adapter_kernel_size
A_ : Optional[int] = adapter_stride
A_ : Dict = num_adapter_layers
A_ : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : Tuple = list(lowercase )
A_ : Optional[Any] = list(lowercase )
A_ : Dict = list(lowercase )
A_ : Dict = xvector_output_dim
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 70 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=3_0 , lowercase=4_0_0 , lowercase=True , lowercase=None , lowercase=True , lowercase=1 / 2_5_5 , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , ):
"""simple docstring"""
A_ : List[str] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
A_ : Union[str, Any] = parent
A_ : List[str] = batch_size
A_ : Optional[int] = num_channels
A_ : Union[str, Any] = min_resolution
A_ : Optional[int] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Any = size
A_ : Optional[Any] = do_rescale
A_ : Any = rescale_factor
A_ : Dict = do_normalize
A_ : Union[str, Any] = image_mean
A_ : Tuple = image_std
A_ : Optional[int] = do_pad
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self , lowercase , lowercase=False ):
"""simple docstring"""
if not batched:
A_ : List[Any] = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A_ , A_ : List[str] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
if w < h:
A_ : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
A_ : Optional[int] = self.size['shortest_edge']
elif w > h:
A_ : Dict = self.size['shortest_edge']
A_ : List[Any] = int(self.size['shortest_edge'] * w / h )
else:
A_ : int = self.size['shortest_edge']
A_ : int = self.size['shortest_edge']
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Any = max(lowercase , key=lambda lowercase : item[0] )[0]
A_ : Optional[int] = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = DetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = DetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase , 'rescale_factor' ) )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
self.assertTrue(hasattr(lowercase , 'do_pad' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowercase )
A_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A_ , A_ : Dict = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A_ : List[str] = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A_ , A_ : List[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(lowercase , return_tensors='pt' ).pixel_values
A_ , A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A_ , A_ : Dict = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[str] = image_processing(lowercase , return_tensors='pt' ).pixel_values
A_ , A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A_ : List[Any] = json.loads(f.read() )
A_ : int = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
A_ : List[Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
A_ : Any = image_processing(images=lowercase , annotations=lowercase , return_tensors='pt' )
# verify pixel values
A_ : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
A_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : Optional[int] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
A_ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
A_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
A_ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify orig_size
A_ : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
A_ : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A_ : int = json.loads(f.read() )
A_ : Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
A_ : Tuple = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A_ : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
A_ : Optional[Any] = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='pt' )
# verify pixel values
A_ : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
A_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
A_ : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
A_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
A_ : Any = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify masks
A_ : str = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase )
# verify orig_size
A_ : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
A_ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
| 70 | import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger()
def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase )
else:
A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase )
if hidden_sizes == 1_92:
A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase )
if hidden_sizes == 2_56:
A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase )
if hidden_sizes == 3_84:
A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase )
from_model.eval()
A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval()
A_ : Union[str, Any] = OrderedDict()
A_ : Dict = from_model.state_dict()
A_ : Tuple = list(from_model.state_dict().keys() )
A_ : str = list(our_model.state_dict().keys() )
print(len(__lowercase ) ,len(__lowercase ) )
for i in range(len(__lowercase ) ):
A_ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowercase )
A_ : str = torch.randn((2, 3, 2_24, 2_24) )
A_ : str = from_model(__lowercase )
A_ : Optional[Any] = our_model(__lowercase ).logits
assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one."
A_ : List[str] = name
print(__lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A_ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ):
'''simple docstring'''
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : Optional[int] = 10_00
A_ : Optional[int] = (1, num_labels)
A_ : int = 'huggingface/label-files'
A_ : int = num_labels
A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase )
A_ : Any = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
A_ : Tuple = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 70 | 1 |
def UpperCamelCase ( __lowercase : int = 50_00_00_00 ):
'''simple docstring'''
A_ : List[Any] = set()
A_ : int = int((limit - 24) ** (1 / 2) )
A_ : List[Any] = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,__lowercase ) ) )
for primea in primes:
A_ : Any = primea * primea
for primea in primes:
A_ : Dict = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
A_ : List[Any] = primea * primea * primea * primea
A_ : int = square + cube + tetr
if total >= limit:
break
ret.add(__lowercase )
return len(__lowercase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | def UpperCamelCase ( __lowercase : str ,__lowercase : int ):
'''simple docstring'''
A_ : int = word.split()
def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str:
A_ : Optional[Any] = max_width - width
A_ : Union[str, Any] = len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A_ : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A_ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A_ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
A_ : Tuple = []
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
A_ : List[str] = []
A_ : list[str] = []
A_ : Dict = 0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase ,__lowercase ,__lowercase ) )
# reset new line and new width
A_ , A_ : Any = [word], len(__lowercase )
A_ : int = max_width - width - len(__lowercase )
answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase :
'''simple docstring'''
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
raise NotImplementedError()
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = False , **lowercase ):
"""simple docstring"""
A_ : List[Any] = tokenizer
A_ : Optional[Any] = skip_prompt
A_ : Optional[int] = decode_kwargs
# variables used in the streaming process
A_ : Union[str, Any] = []
A_ : str = 0
A_ : Tuple = True
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
A_ : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
A_ : Dict = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
A_ : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
A_ : Tuple = text[self.print_len :]
A_ : List[Any] = []
A_ : int = 0
# If the last token is a CJK character, we print the characters.
elif len(lowercase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
A_ : List[str] = text[self.print_len :]
self.print_len += len(lowercase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
A_ : Optional[int] = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(lowercase )
self.on_finalized_text(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if len(self.token_cache ) > 0:
A_ : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
A_ : Optional[Any] = text[self.print_len :]
A_ : Dict = []
A_ : Optional[Any] = 0
else:
A_ : List[str] = ''
A_ : Tuple = True
self.on_finalized_text(lowercase , stream_end=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
print(lowercase , flush=lowercase , end='' if not stream_end else None )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = False , lowercase = None , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , lowercase , **lowercase )
A_ : Union[str, Any] = Queue()
A_ : str = None
A_ : Optional[int] = timeout
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
self.text_queue.put(lowercase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 70 | import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''summarization'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ROUGE_KEYS
lowerCamelCase_ = '''rouge2'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
A_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
A_ : List[str] = Path(self.output_dir ) / 'metrics.json'
A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
A_ : str = 0
A_ : Any = defaultdict(lowercase )
A_ : Union[str, Any] = self.config.model_type
A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
A_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ : Optional[Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ : Tuple = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ : int = get_git_info()['repo_sha']
A_ : int = hparams.num_workers
A_ : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ : Any = self.decoder_start_token_id
A_ : str = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
A_ : Union[str, Any] = False
A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ : int = self.hparams.eval_max_gen_length
else:
A_ : List[Any] = self.model.config.max_length
A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
A_ : int = True
return readable_batch
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
return self.model(lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer.pad_token_id
A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask']
A_ : str = batch['labels']
if isinstance(self.model , lowercase ):
A_ : Optional[int] = self.model._shift_right(lowercase )
else:
A_ : Any = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ : Optional[Any] = decoder_input_ids
self.save_readable_batch(lowercase )
A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
A_ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 )
A_ , A_ : Any = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self._step(lowercase )
A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
A_ : str = batch['input_ids'].shape[0]
A_ : Any = batch['input_ids'].eq(self.pad ).sum()
A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase="val" ):
"""simple docstring"""
self.step_count += 1
A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ : Dict = losses['loss']
A_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
A_ : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ : Tuple = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
A_ : Dict = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_rouge(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ : Optional[int] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ : int = (time.time() - ta) / batch['input_ids'].shape[0]
A_ : List[str] = self.ids_to_clean_text(lowercase )
A_ : List[str] = self.ids_to_clean_text(batch['labels'] )
A_ : List[Any] = self._step(lowercase )
A_ : int = dict(zip(self.loss_names , lowercase ) )
A_ : Dict = self.calc_generative_metrics(lowercase , lowercase )
A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.validation_epoch_end(lowercase , prefix='test' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.n_obs[type_path]
A_ : List[Any] = self.target_lens[type_path]
A_ : str = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
A_ : Optional[int] = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase )
parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase )
parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
'--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''translation'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ['''bleu''']
lowerCamelCase_ = '''bleu'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , **lowercase )
A_ : List[Any] = hparams.src_lang
A_ : str = hparams.tgt_lang
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_bleu(lowercase , lowercase )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ : SummarizationModule = SummarizationModule(__lowercase )
else:
A_ : SummarizationModule = TranslationModule(__lowercase )
A_ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
A_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase )
A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
A_ : str = False
A_ : Dict = args.val_metric == 'loss'
A_ : pl.Trainer = generic_train(
__lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,)
pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
A_ : Optional[Any] = ''
A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) )
if checkpoints:
A_ : List[Any] = checkpoints[-1]
A_ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
_UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase = parser.parse_args()
main(args)
| 70 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=3_0 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1_0 , lowercase=0.02 , lowercase=3 , lowercase=None , lowercase=2 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : int = batch_size
A_ : Optional[int] = image_size
A_ : Optional[Any] = patch_size
A_ : Optional[int] = num_channels
A_ : Dict = is_training
A_ : List[Any] = use_labels
A_ : List[Any] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Tuple = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Optional[int] = scope
A_ : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 2
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : str = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Tuple = TFDeiTModel(config=lowercase )
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : List[Any] = TFDeiTForMaskedImageModeling(config=lowercase )
A_ : str = model(lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : str = 1
A_ : str = TFDeiTForMaskedImageModeling(lowercase )
A_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Any = model(lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Optional[int] = self.type_sequence_label_size
A_ : Dict = TFDeiTForImageClassification(lowercase )
A_ : Tuple = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : List[str] = 1
A_ : Union[str, Any] = TFDeiTForImageClassification(lowercase )
A_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[Any] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Any = config_and_inputs
A_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase_ = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFDeiTModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=3_7 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , tf.keras.layers.Dense ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(lowercase )
A_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[Any] = [*signature.parameters.keys()]
A_ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=False ):
"""simple docstring"""
A_ : List[str] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = TFDeiTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
A_ : Union[str, Any] = self.default_image_processor
A_ : List[Any] = prepare_img()
A_ : Any = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Optional[Any] = model(**lowercase )
# verify the logits
A_ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Tuple = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 70 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Dict = image_size
A_ : str = num_channels
A_ : Union[str, Any] = embeddings_size
A_ : Optional[Any] = hidden_sizes
A_ : Any = depths
A_ : List[str] = is_training
A_ : int = use_labels
A_ : Optional[Any] = hidden_act
A_ : List[Any] = num_labels
A_ : Optional[int] = scope
A_ : int = len(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = TFRegNetModel(config=lowercase )
A_ : Optional[Any] = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : Tuple = TFRegNetForImageClassification(lowercase )
A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = TFRegNetModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : List[Any] = model_class(lowercase )
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Any = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase )
A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
A_ : Dict = model_class(lowercase )
A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase )
A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : Any = self._prepare_for_class(lowercase , lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : Any = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Tuple = model(**lowercase , training=lowercase )
# verify the logits
A_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 70 | 1 |
def UpperCamelCase ( __lowercase : int ,__lowercase : int ,__lowercase : int ):
'''simple docstring'''
A_ : List[str] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase ( ):
'''simple docstring'''
print(sum_of_series(1 ,1 ,10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : int = 0
while b > 0:
if b & 1:
A_ : Any = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 70 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = IFInpaintingPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def lowerCAmelCase_ ( self , lowercase , lowercase=0 ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : Any = torch.manual_seed(lowercase )
else:
A_ : Tuple = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 70 | def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70 | 1 |
from ....utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=None , lowercase=2_0_4_8 ):
"""simple docstring"""
A_ : Union[str, Any] = config.__dict__
A_ : Union[str, Any] = modal_hidden_size
if num_labels:
A_ : Any = num_labels
| 70 | from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70 | 1 |
from collections.abc import Sequence
def UpperCamelCase ( __lowercase : Sequence[float] ,__lowercase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def UpperCamelCase ( __lowercase : Sequence[float] ,__lowercase : float ):
'''simple docstring'''
A_ : Any = 0.0
for coeff in reversed(__lowercase ):
A_ : int = result * x + coeff
return result
if __name__ == "__main__":
_UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 70 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = ArgumentParser('Transformers CLI tool' ,usage='transformers-cli <command> [<args>]' )
A_ : List[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(__lowercase )
DownloadCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
RunCommand.register_subcommand(__lowercase )
ServeCommand.register_subcommand(__lowercase )
UserCommands.register_subcommand(__lowercase )
AddNewModelCommand.register_subcommand(__lowercase )
AddNewModelLikeCommand.register_subcommand(__lowercase )
LfsCommands.register_subcommand(__lowercase )
PTtoTFCommand.register_subcommand(__lowercase )
# Let's go
A_ : List[str] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
A_ : Dict = args.func(__lowercase )
service.run()
if __name__ == "__main__":
main()
| 70 | from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 70 | 1 |
def UpperCamelCase ( __lowercase : int ,__lowercase : int ):
'''simple docstring'''
while second != 0:
A_ : List[str] = first & second
first ^= second
A_ : str = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = int(input("""Enter the first number: """).strip())
_UpperCAmelCase = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 70 | import random
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = num - 1
A_ : Optional[Any] = 0
while s % 2 == 0:
A_ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
A_ : Optional[int] = random.randrange(2 ,num - 1 )
A_ : Any = pow(__lowercase ,__lowercase ,__lowercase )
if v != 1:
A_ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A_ : Union[str, Any] = i + 1
A_ : Tuple = (v**2) % num
return True
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if num < 2:
return False
A_ : Optional[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def UpperCamelCase ( __lowercase : int = 10_24 ):
'''simple docstring'''
while True:
A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
_UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 70 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_UpperCAmelCase = """hf-internal-testing/tiny-random-bert"""
_UpperCAmelCase = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
_UpperCAmelCase = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = cached_file(lowercase , lowercase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowercase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowercase , lowercase ) ) )
with open(os.path.join(lowercase , 'refs' , 'main' ) ) as f:
A_ : Optional[Any] = f.read()
self.assertEqual(lowercase , os.path.join(lowercase , 'snapshots' , lowercase , lowercase ) )
self.assertTrue(os.path.isfile(lowercase ) )
# File is cached at the same place the second time.
A_ : Optional[Any] = cached_file(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
# Using a specific revision to test the full commit hash.
A_ : List[str] = cached_file(lowercase , lowercase , revision='9b8c223' )
self.assertEqual(lowercase , os.path.join(lowercase , 'snapshots' , lowercase , lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowercase , 'is not a valid model identifier' ):
A_ : Tuple = cached_file('tiny-random-bert' , lowercase )
with self.assertRaisesRegex(lowercase , 'is not a valid git identifier' ):
A_ : int = cached_file(lowercase , lowercase , revision='aaaa' )
with self.assertRaisesRegex(lowercase , 'does not appear to have a file named' ):
A_ : List[Any] = cached_file(lowercase , 'conf' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowercase , 'does not appear to have a file named' ):
A_ : Optional[int] = cached_file(lowercase , 'conf' )
with open(os.path.join(lowercase , 'refs' , 'main' ) ) as f:
A_ : Union[str, Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowercase , '.no_exist' , lowercase , 'conf' ) ) )
A_ : str = cached_file(lowercase , 'conf' , _raise_exceptions_for_missing_entries=lowercase )
self.assertIsNone(lowercase )
A_ : int = cached_file(lowercase , 'conf' , local_files_only=lowercase , _raise_exceptions_for_missing_entries=lowercase )
self.assertIsNone(lowercase )
A_ : Dict = mock.Mock()
A_ : Optional[int] = 5_0_0
A_ : List[str] = {}
A_ : Any = HTTPError
A_ : Any = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowercase ) as mock_head:
A_ : str = cached_file(lowercase , 'conf' , _raise_exceptions_for_connection_errors=lowercase )
self.assertIsNone(lowercase )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , lowercase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , lowercase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowercase , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , lowercase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowercase , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , lowercase , revision='ahaha' )
A_ : List[Any] = get_file_from_repo('bert-base-cased' , lowercase )
# The name is the cached name which is not very easy to test, so instead we load the content.
A_ : List[str] = json.loads(open(lowercase , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 7_6_8 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : List[str] = Path(lowercase ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(lowercase , 'a.txt' ) , str(lowercase ) )
self.assertIsNone(get_file_from_repo(lowercase , 'b.txt' ) )
| 70 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 1 |
from ... import PretrainedConfig
_UpperCAmelCase = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCamelCase_ = '''nezha'''
def __init__( self , lowercase=2_1_1_2_8 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=6_4 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase=0 , lowercase=2 , lowercase=3 , lowercase=True , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : List[Any] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Optional[int] = hidden_act
A_ : Dict = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : Tuple = max_relative_position
A_ : str = type_vocab_size
A_ : str = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[int] = classifier_dropout
A_ : str = use_cache
| 70 | import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = 4
A_ : int = 3
A_ : List[str] = (3_2, 3_2)
A_ : Any = jax.random.PRNGKey(0 )
A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A_ : int = self.dummy_input
return init_dict, inputs_dict
| 70 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 70 | import numpy as np
_UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Any = np.array(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = message.lower()
A_ : Tuple = message.replace(' ' , '' )
A_ : int = message.replace('j' , 'i' )
A_ : Any = np.empty((2, len(lowercase )) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
A_ : Union[str, Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) )
A_ : int = ''
for numbers_index in range(len(lowercase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : str = int(second_step[(numbers_index * 2) + 1] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = message.lower()
message.replace(' ' , '' )
A_ : Tuple = np.empty(2 * len(lowercase ) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Optional[int] = numbers[0]
A_ : Dict = numbers[1]
A_ : Optional[int] = first_step.reshape((2, len(lowercase )) )
A_ : List[str] = ''
for numbers_index in range(len(lowercase ) ):
A_ : List[Any] = int(second_step[0, numbers_index] )
A_ : Optional[int] = int(second_step[1, numbers_index] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : str = decoded_message + letter
return decoded_message
| 70 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' ,[None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' ,['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Any ,__lowercase : int ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,'IN_MEMORY_MAX_SIZE' ,__lowercase )
A_ : List[str] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A_ : Dict = dataset_size < in_memory_max_size
else:
A_ : Dict = False
A_ : Union[str, Any] = is_small_dataset(__lowercase )
assert result == expected
| 70 | from math import sqrt
def UpperCamelCase ( __lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : int = 0
A_ : int = 0
A_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowercase ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70 | import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ):
"""simple docstring"""
super().__init__()
A_ : Tuple = initial_learning_rate
A_ : List[str] = warmup_steps
A_ : int = power
A_ : Dict = decay_schedule_fn
A_ : Any = name
def __call__( self , lowercase ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A_ : Optional[int] = tf.cast(lowercase , tf.floataa )
A_ : int = tf.cast(self.warmup_steps , tf.floataa )
A_ : Optional[int] = global_step_float / warmup_steps_float
A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,):
'''simple docstring'''
A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,)
if num_warmup_steps:
A_ : Tuple = WarmUp(
initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,)
if weight_decay_rate > 0.0:
A_ : Union[str, Any] = AdamWeightDecay(
learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,)
else:
A_ : Dict = tf.keras.optimizers.Adam(
learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ):
"""simple docstring"""
super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
A_ : Dict = weight_decay_rate
A_ : Union[str, Any] = include_in_weight_decay
A_ : str = exclude_from_weight_decay
@classmethod
def lowerCAmelCase_ ( cls , lowercase ):
"""simple docstring"""
A_ : Tuple = {'WarmUp': WarmUp}
return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase )
A_ : Optional[Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ):
"""simple docstring"""
A_ , A_ : Optional[int] = list(zip(*lowercase ) )
return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A_ : List[str] = apply_state or {}
A_ : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A_ : Dict = self._fallback_apply_state(lowercase , lowercase )
A_ : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return False
return True
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : int = []
A_ : Optional[int] = None
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self._accum_steps is None:
A_ : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowercase ):
"""simple docstring"""
if not self._gradients:
A_ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' )
for accum_gradient, gradient in zip(self._gradients , lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase ) )
| 70 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''segformer'''
def __init__( self , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[3_2, 6_4, 1_6_0, 2_5_6] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[1, 2, 5, 8] , lowercase=[4, 4, 4, 4] , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.02 , lowercase=0.1 , lowercase=1E-6 , lowercase=2_5_6 , lowercase=2_5_5 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowercase , )
A_ : Optional[int] = num_channels
A_ : int = num_encoder_blocks
A_ : Optional[int] = depths
A_ : Optional[int] = sr_ratios
A_ : int = hidden_sizes
A_ : List[str] = patch_sizes
A_ : Any = strides
A_ : Union[str, Any] = mlp_ratios
A_ : str = num_attention_heads
A_ : List[str] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = classifier_dropout_prob
A_ : Dict = initializer_range
A_ : Any = drop_path_rate
A_ : Dict = layer_norm_eps
A_ : Optional[int] = decoder_hidden_size
A_ : Any = kwargs.get('reshape_last_stage' , lowercase )
A_ : Tuple = semantic_loss_ignore_index
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1E-4
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1_2
| 70 | from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : Any = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
| 70 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''sew'''
def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase=2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=0 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=0 , lowercase=1 , lowercase=2 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : Union[str, Any] = hidden_size
A_ : str = feat_extract_norm
A_ : int = feat_extract_activation
A_ : List[str] = list(lowercase )
A_ : str = list(lowercase )
A_ : Tuple = list(lowercase )
A_ : int = conv_bias
A_ : int = num_conv_pos_embeddings
A_ : Dict = num_conv_pos_embedding_groups
A_ : Any = len(self.conv_dim )
A_ : Optional[Any] = num_hidden_layers
A_ : List[str] = intermediate_size
A_ : List[str] = squeeze_factor
A_ : Optional[int] = hidden_act
A_ : Optional[Any] = num_attention_heads
A_ : List[str] = hidden_dropout
A_ : Dict = attention_dropout
A_ : int = activation_dropout
A_ : Any = feat_proj_dropout
A_ : Any = final_dropout
A_ : Dict = layerdrop
A_ : str = layer_norm_eps
A_ : int = initializer_range
A_ : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Any = apply_spec_augment
A_ : Union[str, Any] = mask_time_prob
A_ : Optional[int] = mask_time_length
A_ : int = mask_time_min_masks
A_ : List[Any] = mask_feature_prob
A_ : int = mask_feature_length
A_ : str = mask_feature_min_masks
# ctc loss
A_ : Any = ctc_loss_reduction
A_ : Any = ctc_zero_infinity
# sequence classification
A_ : Optional[Any] = use_weighted_layer_sum
A_ : Any = classifier_proj_size
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 70 | def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70 | 1 |
import numpy as np
_UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Any = np.array(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = message.lower()
A_ : Tuple = message.replace(' ' , '' )
A_ : int = message.replace('j' , 'i' )
A_ : Any = np.empty((2, len(lowercase )) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
A_ : Union[str, Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) )
A_ : int = ''
for numbers_index in range(len(lowercase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : str = int(second_step[(numbers_index * 2) + 1] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = message.lower()
message.replace(' ' , '' )
A_ : Tuple = np.empty(2 * len(lowercase ) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Optional[int] = numbers[0]
A_ : Dict = numbers[1]
A_ : Optional[int] = first_step.reshape((2, len(lowercase )) )
A_ : List[str] = ''
for numbers_index in range(len(lowercase ) ):
A_ : List[Any] = int(second_step[0, numbers_index] )
A_ : Optional[int] = int(second_step[1, numbers_index] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : str = decoded_message + letter
return decoded_message
| 70 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : List[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : str = get_activation('gelu' )
A_ : int = get_activation('gelu_10' )
A_ : Optional[int] = torch_builtin(lowercase )
A_ : Tuple = geluaa(lowercase )
A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase ):
get_activation('bogus' )
with self.assertRaises(lowercase ):
get_activation(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = get_activation('gelu' )
A_ : List[str] = 1
A_ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase ):
A_ : str = acta.a
| 70 | 1 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = GPTSanJapaneseTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = {'''do_clean_text''': False, '''add_prefix_space''': False}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
A_ : int = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
A_ : Tuple = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
A_ : int = {'unk_token': '<unk>'}
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(lowercase ) )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Tuple = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
A_ : List[str] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Any = self.get_input_output_texts(lowercase )
A_ : int = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A_ : Optional[Any] = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
return text, ids
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.get_tokenizer()
# Testing tokenization
A_ : List[str] = 'こんにちは、世界。 こんばんは、㔺界。'
A_ : int = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
A_ : Optional[int] = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids without special tokens
A_ : List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
A_ : List[str] = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids with special tokens
A_ : Tuple = tokens + [tokenizer.unk_token]
A_ : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
A_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
A_ : int = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
A_ : List[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。'
A_ : List[str] = tokenizer.encode(lowercase )
A_ : Optional[int] = tokenizer.decode(lowercase )
self.assertEqual(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
A_ : Union[str, Any] = 'こんにちは、世界。'
A_ : Any = 'こんばんは、㔺界。😀'
A_ : List[str] = 'こんにちは、世界。こんばんは、世界。😀'
A_ : Any = tokenizer.encode(prefix_text + input_text )
A_ : Union[str, Any] = tokenizer.encode('' , prefix_text=prefix_text + input_text )
A_ : List[Any] = tokenizer.encode(lowercase , prefix_text=lowercase )
A_ : List[Any] = tokenizer.decode(lowercase )
A_ : Union[str, Any] = tokenizer.decode(lowercase )
A_ : List[Any] = tokenizer.decode(lowercase )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
A_ : Optional[int] = 'こんにちは、世界。'
A_ : List[str] = 'こんばんは、㔺界。😀'
A_ : Optional[Any] = len(tokenizer.encode(lowercase ) ) - 2
A_ : Dict = len(tokenizer.encode(lowercase ) ) - 2
A_ : int = [1] + [0] * (len_prefix + len_text + 1)
A_ : Any = [1] * (len_prefix + len_text + 1) + [0]
A_ : Any = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
A_ : List[str] = tokenizer(prefix_text + input_text ).token_type_ids
A_ : Optional[Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
A_ : Dict = tokenizer(lowercase , prefix_text=lowercase ).token_type_ids
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
A_ : int = tokenizer.encode('あンいワ' )
A_ : Union[str, Any] = tokenizer.encode('' , prefix_text='あンいワ' )
A_ : List[Any] = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(lowercase ) , tokenizer.decode(lowercase ) )
self.assertEqual(tokenizer.decode(lowercase ) , tokenizer.decode(lowercase ) )
self.assertNotEqual(lowercase , lowercase )
self.assertNotEqual(lowercase , lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
A_ : Any = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
A_ : Optional[int] = tokenizer(lowercase , padding=lowercase )
A_ : str = tokenizer.batch_encode_plus(lowercase , padding=lowercase )
# fmt: off
A_ : str = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
A_ : int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
A_ : Tuple = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase )
self.assertListEqual(x_token.token_type_ids , lowercase )
self.assertListEqual(x_token.attention_mask , lowercase )
self.assertListEqual(x_token_a.input_ids , lowercase )
self.assertListEqual(x_token_a.token_type_ids , lowercase )
self.assertListEqual(x_token_a.attention_mask , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
| 70 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.convolution(self.padding(lowercase ) )
A_ : List[str] = self.normalization(lowercase )
A_ : List[Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = config.num_channels
A_ : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Optional[int] = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.pooler(lowercase )
for layer_module in self.attention:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[int] = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : Optional[int] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : int = layer_module(lowercase )
A_ : Union[str, Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Dict = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : int = max(1 , out_channels // config.groups_width )
A_ : Optional[int] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : List[str] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = hidden_state
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : str = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Dict = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[Any] = config
A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : str = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase )
A_ : Optional[int] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Dict = encoder_outputs[0]
A_ : List[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : int = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : List[Any] = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : List[Any] = self.classifier[0](lowercase )
A_ : Union[str, Any] = self.classifier[1](lowercase )
A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 70 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowercase , **lowercase ):
"""simple docstring"""
return super().__call__(lowercase , **lowercase )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
A_ : List[Any] = {}
if "candidate_labels" in kwargs:
A_ : List[str] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
A_ : Dict = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase_ ( self , lowercase , lowercase=None , lowercase="This is a photo of {}." ):
"""simple docstring"""
A_ : int = load_image(lowercase )
A_ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
A_ : Dict = candidate_labels
A_ : int = [hypothesis_template.format(lowercase ) for x in candidate_labels]
A_ : int = self.tokenizer(lowercase , return_tensors=self.framework , padding=lowercase )
A_ : Optional[int] = [text_inputs]
return inputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = model_inputs.pop('candidate_labels' )
A_ : List[Any] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowercase ):
A_ : Optional[Any] = text_inputs[0]
else:
# Batching case.
A_ : Optional[Any] = text_inputs[0][0]
A_ : Tuple = self.model(**lowercase , **lowercase )
A_ : Tuple = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = model_outputs.pop('candidate_labels' )
A_ : str = model_outputs['logits'][0]
if self.framework == "pt":
A_ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
A_ : int = probs.tolist()
if not isinstance(lowercase , lowercase ):
A_ : Any = [scores]
elif self.framework == "tf":
A_ : Union[str, Any] = stable_softmax(lowercase , axis=-1 )
A_ : Any = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
A_ : Tuple = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowercase , lowercase ) , key=lambda lowercase : -x[0] )
]
return result
| 70 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( *lowercase , **lowercase ):
"""simple docstring"""
pass
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_UpperCAmelCase = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Dict = pipeline(
'document-question-answering' , model=lowercase , tokenizer=lowercase , image_processor=lowercase )
A_ : Union[str, Any] = INVOICE_URL
A_ : List[Any] = list(zip(*apply_tesseract(load_image(lowercase ) , lowercase , '' ) ) )
A_ : List[str] = 'What is the placebo?'
A_ : Dict = [
{
'image': load_image(lowercase ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : List[Any] = dqa_pipeline(lowercase , top_k=2 )
self.assertEqual(
lowercase , [
[
{'score': ANY(lowercase ), 'answer': ANY(lowercase ), 'start': ANY(lowercase ), 'end': ANY(lowercase )},
{'score': ANY(lowercase ), 'answer': ANY(lowercase ), 'start': ANY(lowercase ), 'end': ANY(lowercase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
A_ : Optional[Any] = INVOICE_URL
A_ : Tuple = 'How many cats are there?'
A_ : Any = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
A_ : Optional[int] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 )
self.assertEqual(nested_simplify(lowercase , decimals=4 ) , lowercase )
A_ : int = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(lowercase , decimals=4 ) , lowercase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
A_ : Optional[int] = './tests/fixtures/tests_samples/COCO/000000039769.png'
A_ : Union[str, Any] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 )
self.assertEqual(lowercase , [] )
# We can optionnally pass directly the words and bounding boxes
A_ : int = './tests/fixtures/tests_samples/COCO/000000039769.png'
A_ : Dict = []
A_ : List[Any] = []
A_ : Union[str, Any] = dqa_pipeline(image=lowercase , question=lowercase , words=lowercase , boxes=lowercase , top_k=2 )
self.assertEqual(lowercase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
A_ : List[str] = INVOICE_URL
A_ : Optional[Any] = 'What is the invoice number?'
A_ : List[Any] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
A_ : Optional[Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
A_ : Tuple = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
A_ : Optional[int] = INVOICE_URL
A_ : List[str] = 'What is the invoice number?'
A_ : List[Any] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
A_ : int = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
A_ : Union[str, Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowercase )
A_ : str = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowercase , revision='3dc6de3' , )
A_ : Optional[Any] = INVOICE_URL
A_ : Optional[int] = 'What is the invoice number?'
A_ : Tuple = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
A_ : Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
A_ : Dict = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
A_ : Any = list(zip(*apply_tesseract(load_image(lowercase ) , lowercase , '' ) ) )
# This model should also work if `image` is set to None
A_ : Union[str, Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowercase )
A_ : List[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowercase , revision='3dc6de3' , max_seq_len=5_0 , )
A_ : List[str] = INVOICE_URL
A_ : Optional[Any] = 'What is the invoice number?'
A_ : Any = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
A_ : str = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
A_ : Any = list(zip(*apply_tesseract(load_image(lowercase ) , lowercase , '' ) ) )
# This model should also work if `image` is set to None
A_ : Union[str, Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
A_ : Dict = INVOICE_URL
A_ : List[Any] = 'What is the invoice number?'
A_ : str = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 )
self.assertEqual(nested_simplify(lowercase , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
| 70 | def UpperCamelCase ( __lowercase : list ):
'''simple docstring'''
A_ : str = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
A_ , A_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 70 | 1 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase ( __A ):
'''simple docstring'''
pass
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
A_ : Any = data
A_ : Node | None = None
def __iter__( self ):
"""simple docstring"""
A_ : str = self
A_ : str = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowercase )
yield node.data
A_ : int = node.next_node
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_UpperCAmelCase = Node(1)
_UpperCAmelCase = Node(2)
_UpperCAmelCase = Node(3)
_UpperCAmelCase = Node(4)
print(root_node.has_loop) # False
_UpperCAmelCase = root_node.next_node
print(root_node.has_loop) # True
_UpperCAmelCase = Node(5)
_UpperCAmelCase = Node(6)
_UpperCAmelCase = Node(5)
_UpperCAmelCase = Node(6)
print(root_node.has_loop) # False
_UpperCAmelCase = Node(1)
print(root_node.has_loop) # False
| 70 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''wavlm'''
def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : List[Any] = hidden_size
A_ : Tuple = feat_extract_norm
A_ : Dict = feat_extract_activation
A_ : Optional[Any] = list(lowercase )
A_ : Union[str, Any] = list(lowercase )
A_ : List[str] = list(lowercase )
A_ : str = conv_bias
A_ : Tuple = num_buckets
A_ : Union[str, Any] = max_bucket_distance
A_ : int = num_conv_pos_embeddings
A_ : str = num_conv_pos_embedding_groups
A_ : str = len(self.conv_dim )
A_ : Tuple = num_hidden_layers
A_ : Tuple = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[Any] = num_attention_heads
A_ : str = hidden_dropout
A_ : Optional[int] = attention_dropout
A_ : Optional[Any] = activation_dropout
A_ : Optional[int] = feat_proj_dropout
A_ : List[Any] = final_dropout
A_ : Union[str, Any] = layerdrop
A_ : Dict = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : str = num_ctc_classes
A_ : Any = vocab_size
A_ : str = do_stable_layer_norm
A_ : int = use_weighted_layer_sum
A_ : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : List[str] = apply_spec_augment
A_ : Optional[Any] = mask_time_prob
A_ : int = mask_time_length
A_ : Any = mask_time_min_masks
A_ : Optional[int] = mask_feature_prob
A_ : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
A_ : int = num_codevectors_per_group
A_ : Any = num_codevector_groups
A_ : List[Any] = contrastive_logits_temperature
A_ : Optional[Any] = num_negatives
A_ : Optional[Any] = codevector_dim
A_ : int = proj_codevector_dim
A_ : int = diversity_loss_weight
# ctc loss
A_ : Union[str, Any] = ctc_loss_reduction
A_ : Any = ctc_zero_infinity
# adapter
A_ : int = add_adapter
A_ : Optional[Any] = adapter_kernel_size
A_ : Optional[int] = adapter_stride
A_ : Dict = num_adapter_layers
A_ : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : Tuple = list(lowercase )
A_ : Optional[Any] = list(lowercase )
A_ : Dict = list(lowercase )
A_ : Dict = xvector_output_dim
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 70 | 1 |
import math
import qiskit
def UpperCamelCase ( __lowercase : int = 1 ,__lowercase : int = 1 ,__lowercase : int = 1 ):
'''simple docstring'''
if (
isinstance(__lowercase ,__lowercase )
or isinstance(__lowercase ,__lowercase )
or isinstance(__lowercase ,__lowercase )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
A_ : Union[str, Any] = qiskit.QuantumRegister(4 ,'qr' )
A_ : int = qiskit.ClassicalRegister(2 ,'cr' )
# list the entries
A_ : List[str] = [input_a, input_a, carry_in]
A_ : List[str] = qiskit.QuantumCircuit(__lowercase ,__lowercase )
for i in range(0 ,3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 ,1 ,3 ) # ccx = toffoli gate
quantum_circuit.cx(0 ,1 )
quantum_circuit.ccx(1 ,2 ,3 )
quantum_circuit.cx(1 ,2 )
quantum_circuit.cx(0 ,1 )
quantum_circuit.measure([2, 3] ,__lowercase ) # measure the last two qbits
A_ : List[str] = qiskit.Aer.get_backend('aer_simulator' )
A_ : Optional[int] = qiskit.execute(__lowercase ,__lowercase ,shots=10_00 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 70 | import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger()
def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase )
else:
A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase )
if hidden_sizes == 1_92:
A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase )
if hidden_sizes == 2_56:
A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase )
if hidden_sizes == 3_84:
A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase )
from_model.eval()
A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval()
A_ : Union[str, Any] = OrderedDict()
A_ : Dict = from_model.state_dict()
A_ : Tuple = list(from_model.state_dict().keys() )
A_ : str = list(our_model.state_dict().keys() )
print(len(__lowercase ) ,len(__lowercase ) )
for i in range(len(__lowercase ) ):
A_ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowercase )
A_ : str = torch.randn((2, 3, 2_24, 2_24) )
A_ : str = from_model(__lowercase )
A_ : Optional[Any] = our_model(__lowercase ).logits
assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one."
A_ : List[str] = name
print(__lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A_ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ):
'''simple docstring'''
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : Optional[int] = 10_00
A_ : Optional[int] = (1, num_labels)
A_ : int = 'huggingface/label-files'
A_ : int = num_labels
A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase )
A_ : Any = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
A_ : Tuple = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 70 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase_ = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase_ = PandasConfig
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase , (str, list, tuple) ):
A_ : Tuple = data_files
if isinstance(lowercase , lowercase ):
A_ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ : Dict = [dl_manager.iter_files(lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
A_ : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(lowercase , lowercase ):
A_ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ : Optional[Any] = [dl_manager.iter_files(lowercase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase , gen_kwargs={'files': files} ) )
return splits
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(lowercase , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(lowercase ) ):
with open(lowercase , 'rb' ) as f:
A_ : List[str] = pa.Table.from_pandas(pd.read_pickle(lowercase ) )
yield i, self._cast_table(lowercase )
| 70 | def UpperCamelCase ( __lowercase : str ,__lowercase : int ):
'''simple docstring'''
A_ : int = word.split()
def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str:
A_ : Optional[Any] = max_width - width
A_ : Union[str, Any] = len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A_ : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A_ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A_ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
A_ : Tuple = []
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
A_ : List[str] = []
A_ : list[str] = []
A_ : Dict = 0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase ,__lowercase ,__lowercase ) )
# reset new line and new width
A_ , A_ : Any = [word], len(__lowercase )
A_ : int = max_width - width - len(__lowercase )
answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 | 1 |
def UpperCamelCase ( __lowercase : list[int] ,__lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowercase ) )
def UpperCamelCase ( __lowercase : list[list[int]] ,__lowercase : int ,__lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
if index == len(__lowercase ):
return True
# Recursive Step
for i in range(__lowercase ):
if valid_coloring(graph[index] ,__lowercase ,__lowercase ):
# Color current vertex
A_ : Optional[int] = i
# Validate coloring
if util_color(__lowercase ,__lowercase ,__lowercase ,index + 1 ):
return True
# Backtrack
A_ : List[str] = -1
return False
def UpperCamelCase ( __lowercase : list[list[int]] ,__lowercase : int ):
'''simple docstring'''
A_ : int = [-1] * len(__lowercase )
if util_color(__lowercase ,__lowercase ,__lowercase ,0 ):
return colored_vertices
return []
| 70 | import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''summarization'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ROUGE_KEYS
lowerCamelCase_ = '''rouge2'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
A_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
A_ : List[str] = Path(self.output_dir ) / 'metrics.json'
A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
A_ : str = 0
A_ : Any = defaultdict(lowercase )
A_ : Union[str, Any] = self.config.model_type
A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
A_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ : Optional[Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ : Tuple = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ : int = get_git_info()['repo_sha']
A_ : int = hparams.num_workers
A_ : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ : Any = self.decoder_start_token_id
A_ : str = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
A_ : Union[str, Any] = False
A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ : int = self.hparams.eval_max_gen_length
else:
A_ : List[Any] = self.model.config.max_length
A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
A_ : int = True
return readable_batch
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
return self.model(lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer.pad_token_id
A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask']
A_ : str = batch['labels']
if isinstance(self.model , lowercase ):
A_ : Optional[int] = self.model._shift_right(lowercase )
else:
A_ : Any = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ : Optional[Any] = decoder_input_ids
self.save_readable_batch(lowercase )
A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
A_ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 )
A_ , A_ : Any = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self._step(lowercase )
A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
A_ : str = batch['input_ids'].shape[0]
A_ : Any = batch['input_ids'].eq(self.pad ).sum()
A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase="val" ):
"""simple docstring"""
self.step_count += 1
A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ : Dict = losses['loss']
A_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
A_ : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ : Tuple = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
A_ : Dict = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_rouge(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ : Optional[int] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ : int = (time.time() - ta) / batch['input_ids'].shape[0]
A_ : List[str] = self.ids_to_clean_text(lowercase )
A_ : List[str] = self.ids_to_clean_text(batch['labels'] )
A_ : List[Any] = self._step(lowercase )
A_ : int = dict(zip(self.loss_names , lowercase ) )
A_ : Dict = self.calc_generative_metrics(lowercase , lowercase )
A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.validation_epoch_end(lowercase , prefix='test' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.n_obs[type_path]
A_ : List[Any] = self.target_lens[type_path]
A_ : str = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
A_ : Optional[int] = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase )
parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase )
parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
'--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''translation'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ['''bleu''']
lowerCamelCase_ = '''bleu'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , **lowercase )
A_ : List[Any] = hparams.src_lang
A_ : str = hparams.tgt_lang
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_bleu(lowercase , lowercase )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ : SummarizationModule = SummarizationModule(__lowercase )
else:
A_ : SummarizationModule = TranslationModule(__lowercase )
A_ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
A_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase )
A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
A_ : str = False
A_ : Dict = args.val_metric == 'loss'
A_ : pl.Trainer = generic_train(
__lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,)
pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
A_ : Optional[Any] = ''
A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) )
if checkpoints:
A_ : List[Any] = checkpoints[-1]
A_ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
_UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase = parser.parse_args()
main(args)
| 70 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=3_0 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1_0 , lowercase=0.02 , lowercase=3 , lowercase=None , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = patch_size
A_ : Tuple = num_channels
A_ : Union[str, Any] = is_training
A_ : Dict = use_labels
A_ : Optional[int] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Dict = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Dict = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : str = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Dict = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[int] = (image_size // patch_size) ** 2
A_ : List[Any] = num_patches + 1
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Any = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = TFViTModel(config=lowercase )
A_ : List[Any] = model(lowercase , training=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A_ : Tuple = self.image_size // 2
A_ : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
A_ : List[Any] = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase )
A_ : Dict = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.type_sequence_label_size
A_ : Optional[Any] = TFViTForImageClassification(lowercase )
A_ : Optional[Any] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A_ : Union[str, Any] = self.image_size // 2
A_ : Any = pixel_values[:, :, :image_size, :image_size]
A_ : str = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Optional[int] = 1
A_ : Any = TFViTForImageClassification(lowercase )
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[int] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : int = config_and_inputs
A_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = TFViTModelTester(self )
A_ : List[str] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=3_7 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , tf.keras.layers.Layer ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(lowercase )
A_ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
A_ : Optional[Any] = self.default_image_processor
A_ : Union[str, Any] = prepare_img()
A_ : Union[str, Any] = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Tuple = model(**lowercase )
# verify the logits
A_ : Dict = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Union[str, Any] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 70 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Dict = image_size
A_ : str = num_channels
A_ : Union[str, Any] = embeddings_size
A_ : Optional[Any] = hidden_sizes
A_ : Any = depths
A_ : List[str] = is_training
A_ : int = use_labels
A_ : Optional[Any] = hidden_act
A_ : List[Any] = num_labels
A_ : Optional[int] = scope
A_ : int = len(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = TFRegNetModel(config=lowercase )
A_ : Optional[Any] = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : Tuple = TFRegNetForImageClassification(lowercase )
A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = TFRegNetModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : List[Any] = model_class(lowercase )
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Any = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase )
A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
A_ : Dict = model_class(lowercase )
A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase )
A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : Any = self._prepare_for_class(lowercase , lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : Any = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Tuple = model(**lowercase , training=lowercase )
# verify the logits
A_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 70 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : int = 0
while b > 0:
if b & 1:
A_ : Any = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 70 | 1 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def UpperCamelCase ( __lowercase : Optional[int] ):
'''simple docstring'''
A_ : Dict = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Dict ):
'''simple docstring'''
A_ : str = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : int = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : int ,__lowercase : List[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Tuple = 'imagenet-1k-id2label.json'
A_ : str = 10_00
A_ : Optional[int] = 'huggingface/label-files'
A_ : List[str] = num_labels
A_ : List[Any] = json.load(open(cached_download(hf_hub_url(__lowercase ,__lowercase ,repo_type='dataset' ) ) ,'r' ) )
A_ : Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : str = idalabel
A_ : List[Any] = {v: k for k, v in idalabel.items()}
A_ : Tuple = CvtConfig(num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13":
A_ : str = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21":
A_ : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A_ : Tuple = [2, 2, 20]
A_ : Optional[Any] = [3, 12, 16]
A_ : Dict = [1_92, 7_68, 10_24]
A_ : Dict = CvtForImageClassification(__lowercase )
A_ : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
A_ : Dict = image_size
A_ : Tuple = torch.load(__lowercase ,map_location=torch.device('cpu' ) )
A_ : List[Any] = OrderedDict()
A_ : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A_ : str = list_of_state_dict + cls_token(__lowercase )
A_ : Optional[int] = list_of_state_dict + embeddings(__lowercase )
for cnt in range(config.depth[idx] ):
A_ : Dict = list_of_state_dict + attention(__lowercase ,__lowercase )
A_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowercase )
for i in range(len(__lowercase ) ):
A_ : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 70 | def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''gpt_neox'''
def __init__( self , lowercase=5_0_4_3_2 , lowercase=6_1_4_4 , lowercase=4_4 , lowercase=6_4 , lowercase=2_4_5_7_6 , lowercase="gelu" , lowercase=0.25 , lowercase=1_0_0_0_0 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=2_0_4_8 , lowercase=0.02 , lowercase=1E-5 , lowercase=True , lowercase=0 , lowercase=2 , lowercase=False , lowercase=True , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : Optional[int] = vocab_size
A_ : Tuple = max_position_embeddings
A_ : Optional[Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Tuple = hidden_act
A_ : Optional[Any] = rotary_pct
A_ : int = rotary_emb_base
A_ : Optional[Any] = attention_dropout
A_ : List[Any] = hidden_dropout
A_ : int = classifier_dropout
A_ : Dict = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = use_cache
A_ : int = tie_word_embeddings
A_ : Any = use_parallel_residual
A_ : Optional[int] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
A_ : Union[str, Any] = self.rope_scaling.get('type' , lowercase )
A_ : List[Any] = self.rope_scaling.get('factor' , lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowercase , lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 70 | from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70 | 1 |
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | 1 |
from __future__ import annotations
from typing import TypedDict
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
if not isinstance(__lowercase ,__lowercase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
if not isinstance(__lowercase ,__lowercase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
A_ : Optional[Any] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
A_ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def UpperCamelCase ( __lowercase : str ,__lowercase : int ):
'''simple docstring'''
if not isinstance(__lowercase ,__lowercase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
A_ : str = int(__lowercase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__lowercase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
A_ : Optional[Any] = [''] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
A_ : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_UpperCAmelCase = """Provide a string that I will generate its BWT transform: """
_UpperCAmelCase = input(entry_msg).strip()
_UpperCAmelCase = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
_UpperCAmelCase = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 70 | from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 70 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
A_ : List[Any] = []
for line in lines:
A_ : Optional[Any] = re.sub(r'#.*' ,'' ,__lowercase ) # remove comments
if line:
filtered_lines.append(__lowercase )
A_ : List[Any] = '\n'.join(__lowercase )
# Make a hash from all this code
A_ : int = full_str.encode('utf-8' )
return shaaaa(__lowercase ).hexdigest()
# get importable module names and hash for caching
_UpperCAmelCase = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_UpperCAmelCase = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_UpperCAmelCase = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
_UpperCAmelCase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 70 | import random
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = num - 1
A_ : Optional[Any] = 0
while s % 2 == 0:
A_ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
A_ : Optional[int] = random.randrange(2 ,num - 1 )
A_ : Any = pow(__lowercase ,__lowercase ,__lowercase )
if v != 1:
A_ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A_ : Union[str, Any] = i + 1
A_ : Tuple = (v**2) % num
return True
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if num < 2:
return False
A_ : Optional[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def UpperCamelCase ( __lowercase : int = 10_24 ):
'''simple docstring'''
while True:
A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
_UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 70 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_UpperCAmelCase = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : List[str] = list(s_dict.keys() )
for key in keys:
A_ : List[Any] = r'.*/layers_(\d+)'
A_ : int = key
if re.match(__lowercase ,__lowercase ):
A_ : List[str] = re.sub(r'layers_(\d+)' ,r'block/\1/layer' ,__lowercase )
A_ : List[str] = r'(encoder|decoder)\/'
if re.match(__lowercase ,__lowercase ):
A_ : List[str] = re.match(__lowercase ,__lowercase ).groups()
if groups[0] == "encoder":
A_ : int = re.sub(r'/mlp/' ,r'/1/mlp/' ,__lowercase )
A_ : Optional[int] = re.sub(r'/pre_mlp_layer_norm/' ,r'/1/layer_norm/' ,__lowercase )
elif groups[0] == "decoder":
A_ : Tuple = re.sub(r'/mlp/' ,r'/2/mlp/' ,__lowercase )
A_ : List[str] = re.sub(r'/pre_mlp_layer_norm/' ,r'/2/layer_norm/' ,__lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ : Optional[Any] = new_key.replace(__lowercase ,__lowercase )
print(f'''{key} -> {new_key}''' )
A_ : Optional[Any] = s_dict.pop(__lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ : Tuple = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ : int = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ : Any = s_dict[key].shape[0]
A_ : Optional[int] = s_dict[key]
for idx in range(__lowercase ):
A_ : Optional[Any] = expert_weihts[idx]
print(f'''{key} -> {key.replace('expert/' ,'nested fstring' )}''' )
s_dict.pop(__lowercase )
return s_dict
_UpperCAmelCase = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def UpperCamelCase ( __lowercase : int ,__lowercase : Union[str, Any] ):
'''simple docstring'''
import regex as re
with open(__lowercase ,'r' ) as f:
A_ : str = f.read()
A_ : Optional[int] = re.findall(r'(.*) = ([0-9.]*)' ,__lowercase )
A_ : Union[str, Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ : int = float(__lowercase ) if '.' in value else int(__lowercase )
A_ : Optional[Any] = re.findall(r'(.*activations) = \(\'(.*)\',\)' ,__lowercase )[0]
A_ : Union[str, Any] = str(activation[1] )
A_ : Union[str, Any] = num_experts
A_ : Tuple = SwitchTransformersConfig(**__lowercase )
return config
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : int ,__lowercase : Tuple=None ,__lowercase : Tuple="./" ,__lowercase : Tuple=8 ):
'''simple docstring'''
print(f'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ : List[str] = checkpoints.load_tax_checkpoint(__lowercase )
if gin_file is not None:
A_ : int = convert_gin_to_config(__lowercase ,__lowercase )
else:
A_ : List[str] = SwitchTransformersConfig.from_pretrained(__lowercase )
A_ : List[str] = SwitchTransformersForConditionalGeneration(__lowercase )
A_ : Tuple = flax_params['target']
A_ : List[str] = flatten_dict(__lowercase ,sep='/' )
A_ : List[Any] = rename_keys(__lowercase )
A_ : Optional[int] = unflatten_dict(__lowercase ,sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__lowercase ,__lowercase )
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
_UpperCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 70 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = ShapEPipeline
lowerCamelCase_ = ['''prompt''']
lowerCamelCase_ = ['''prompt''']
lowerCamelCase_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase_ = False
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 3_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 3_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 8
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
A_ : Dict = PriorTransformer(**lowercase )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
A_ : Optional[Any] = ShapERenderer(**lowercase )
return model
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.dummy_prior
A_ : Optional[Any] = self.dummy_text_encoder
A_ : Optional[int] = self.dummy_tokenizer
A_ : List[str] = self.dummy_renderer
A_ : str = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , )
A_ : Union[str, Any] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowerCAmelCase_ ( self , lowercase , lowercase=0 ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : Any = torch.manual_seed(lowercase )
else:
A_ : List[Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Optional[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = 'cpu'
A_ : int = self.get_dummy_components()
A_ : str = self.pipeline_class(**lowercase )
A_ : List[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[int] = pipe(**self.get_dummy_inputs(lowercase ) )
A_ : str = output.images[0]
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
A_ : str = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = torch_device == 'cpu'
A_ : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.get_dummy_components()
A_ : int = self.pipeline_class(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : List[Any] = 1
A_ : List[Any] = 2
A_ : str = self.get_dummy_inputs(lowercase )
for key in inputs.keys():
if key in self.batch_params:
A_ : List[str] = batch_size * [inputs[key]]
A_ : List[str] = pipe(**lowercase , num_images_per_prompt=lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
A_ : Tuple = ShapEPipeline.from_pretrained('openai/shap-e' )
A_ : Any = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : List[str] = torch.Generator(device=lowercase ).manual_seed(0 )
A_ : Dict = pipe(
'a shark' , generator=lowercase , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 70 | import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = 4
A_ : int = 3
A_ : List[str] = (3_2, 3_2)
A_ : Any = jax.random.PRNGKey(0 )
A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A_ : int = self.dummy_input
return init_dict, inputs_dict
| 70 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : List[str]=False ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A_ : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : Union[str, Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A_ : List[str] = ''
else:
A_ : Optional[int] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : int = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ : Optional[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ : int = in_proj_weight[
: config.hidden_size, :
]
A_ : Optional[int] = in_proj_bias[: config.hidden_size]
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
A_ : str = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Tuple ,__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = dct.pop(__lowercase )
A_ : Optional[Any] = val
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : List[str] = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = DeiTConfig()
# all deit models have fine-tuned heads
A_ : List[str] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A_ : int = 10_00
A_ : str = 'huggingface/label-files'
A_ : Any = 'imagenet-1k-id2label.json'
A_ : Any = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[Any] = {v: k for k, v in idalabel.items()}
A_ : List[Any] = int(deit_name[-6:-4] )
A_ : int = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
A_ : Union[str, Any] = 1_92
A_ : str = 7_68
A_ : List[Any] = 12
A_ : Optional[int] = 3
elif deit_name[9:].startswith('small' ):
A_ : Any = 3_84
A_ : str = 15_36
A_ : str = 12
A_ : int = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
A_ : List[str] = 10_24
A_ : Union[str, Any] = 40_96
A_ : int = 24
A_ : Dict = 16
# load original model from timm
A_ : Dict = timm.create_model(__lowercase ,pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Optional[int] = timm_model.state_dict()
A_ : str = create_rename_keys(__lowercase ,__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase ,__lowercase ,__lowercase )
read_in_q_k_v(__lowercase ,__lowercase ,__lowercase )
# load HuggingFace model
A_ : str = DeiTForImageClassificationWithTeacher(__lowercase ).eval()
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
A_ : Tuple = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A_ : int = DeiTImageProcessor(size=__lowercase ,crop_size=config.image_size )
A_ : Optional[Any] = image_processor(images=prepare_img() ,return_tensors='pt' )
A_ : Optional[int] = encoding['pixel_values']
A_ : List[str] = model(__lowercase )
A_ : List[Any] = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase ,outputs.logits ,atol=1e-3 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 70 | import numpy as np
_UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Any = np.array(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = message.lower()
A_ : Tuple = message.replace(' ' , '' )
A_ : int = message.replace('j' , 'i' )
A_ : Any = np.empty((2, len(lowercase )) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
A_ : Union[str, Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) )
A_ : int = ''
for numbers_index in range(len(lowercase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : str = int(second_step[(numbers_index * 2) + 1] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = message.lower()
message.replace(' ' , '' )
A_ : Tuple = np.empty(2 * len(lowercase ) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Optional[int] = numbers[0]
A_ : Dict = numbers[1]
A_ : Optional[int] = first_step.reshape((2, len(lowercase )) )
A_ : List[str] = ''
for numbers_index in range(len(lowercase ) ):
A_ : List[Any] = int(second_step[0, numbers_index] )
A_ : Optional[int] = int(second_step[1, numbers_index] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : str = decoded_message + letter
return decoded_message
| 70 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | from math import sqrt
def UpperCamelCase ( __lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : int = 0
A_ : int = 0
A_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowercase ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Optional[Any]=False ):
'''simple docstring'''
A_ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : str = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : List[Any] ,__lowercase : List[Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Any = ''
else:
A_ : Tuple = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Optional[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ : Any = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[
: config.hidden_size, :
]
A_ : Optional[Any] = in_proj_bias[: config.hidden_size]
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Any = in_proj_weight[
-config.hidden_size :, :
]
A_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
A_ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowercase ,__lowercase )
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Tuple ,__lowercase : Optional[int] ):
'''simple docstring'''
A_ : Any = dct.pop(__lowercase )
A_ : str = val
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : int = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : str ):
'''simple docstring'''
A_ : Optional[Any] = ViTConfig()
A_ : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ : Optional[int] = True
A_ : List[Any] = int(vit_name[-12:-10] )
A_ : List[str] = int(vit_name[-9:-6] )
else:
A_ : Optional[Any] = 10_00
A_ : str = 'huggingface/label-files'
A_ : Optional[int] = 'imagenet-1k-id2label.json'
A_ : Any = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[Any] = idalabel
A_ : Any = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = int(vit_name[-6:-4] )
A_ : Tuple = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
A_ : str = 1_92
A_ : List[Any] = 7_68
A_ : int = 12
A_ : Dict = 3
elif vit_name[9:].startswith('small' ):
A_ : str = 3_84
A_ : List[str] = 15_36
A_ : Dict = 12
A_ : Any = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
A_ : Union[str, Any] = 7_68
A_ : Optional[Any] = 23_04
A_ : int = 8
A_ : int = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
A_ : Dict = 10_24
A_ : List[Any] = 40_96
A_ : Tuple = 24
A_ : Union[str, Any] = 16
elif vit_name[4:].startswith('huge' ):
A_ : Tuple = 12_80
A_ : Optional[Any] = 51_20
A_ : List[Any] = 32
A_ : Optional[Any] = 16
# load original model from timm
A_ : Dict = timm.create_model(__lowercase ,pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowercase )
A_ : Optional[int] = create_rename_keys(__lowercase ,__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase ,__lowercase ,__lowercase )
read_in_q_k_v(__lowercase ,__lowercase ,__lowercase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : Union[str, Any] = ViTModel(__lowercase ).eval()
else:
A_ : Any = ViTForImageClassification(__lowercase ).eval()
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ : Optional[Any] = DeiTImageProcessor(size=config.image_size )
else:
A_ : Tuple = ViTImageProcessor(size=config.image_size )
A_ : List[str] = image_processor(images=prepare_img() ,return_tensors='pt' )
A_ : List[Any] = encoding['pixel_values']
A_ : Union[str, Any] = model(__lowercase )
if base_model:
A_ : int = timm_model.forward_features(__lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowercase ,outputs.pooler_output ,atol=1e-3 )
else:
A_ : Union[str, Any] = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase ,outputs.logits ,atol=1e-3 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 70 | import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ):
"""simple docstring"""
super().__init__()
A_ : Tuple = initial_learning_rate
A_ : List[str] = warmup_steps
A_ : int = power
A_ : Dict = decay_schedule_fn
A_ : Any = name
def __call__( self , lowercase ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A_ : Optional[int] = tf.cast(lowercase , tf.floataa )
A_ : int = tf.cast(self.warmup_steps , tf.floataa )
A_ : Optional[int] = global_step_float / warmup_steps_float
A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,):
'''simple docstring'''
A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,)
if num_warmup_steps:
A_ : Tuple = WarmUp(
initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,)
if weight_decay_rate > 0.0:
A_ : Union[str, Any] = AdamWeightDecay(
learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,)
else:
A_ : Dict = tf.keras.optimizers.Adam(
learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ):
"""simple docstring"""
super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
A_ : Dict = weight_decay_rate
A_ : Union[str, Any] = include_in_weight_decay
A_ : str = exclude_from_weight_decay
@classmethod
def lowerCAmelCase_ ( cls , lowercase ):
"""simple docstring"""
A_ : Tuple = {'WarmUp': WarmUp}
return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase )
A_ : Optional[Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ):
"""simple docstring"""
A_ , A_ : Optional[int] = list(zip(*lowercase ) )
return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A_ : List[str] = apply_state or {}
A_ : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A_ : Dict = self._fallback_apply_state(lowercase , lowercase )
A_ : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return False
return True
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : int = []
A_ : Optional[int] = None
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self._accum_steps is None:
A_ : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowercase ):
"""simple docstring"""
if not self._gradients:
A_ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' )
for accum_gradient, gradient in zip(self._gradients , lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase ) )
| 70 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_UpperCAmelCase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = cn.convert_to_negative(__lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowercase ,1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = canny.gen_gaussian_kernel(9 ,sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = imread('digital_image_processing/image_data/lena_small.jpg' ,0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : Any = canny.canny(__lowercase )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(__lowercase ,5 ,sigma=0.9 ).all()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Union[str, Any] = conv.img_convolve(__lowercase ,__lowercase ).astype(__lowercase )
assert res.any()
def UpperCamelCase ( ):
'''simple docstring'''
assert med.median_filter(__lowercase ,3 ).any()
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : List[str] = sob.sobel_filter(__lowercase )
assert grad.any() and theta.any()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(__lowercase ,20 )
assert sepia.all()
def UpperCamelCase ( __lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : int = bs.Burkes(imread(__lowercase ,1 ) ,1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase ( __lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ,):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(__lowercase ,1 ) ,4_00 ,2_00 )
nn.process()
assert nn.output.any()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(__lowercase ,0 )
# Test for get_neighbors_pixel function() return not None
A_ : int = 0
A_ : int = 0
A_ : Tuple = image[x_coordinate][y_coordinate]
A_ : int = lbp.get_neighbors_pixel(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : Optional[int] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 ,image.shape[0] ):
for j in range(0 ,image.shape[1] ):
A_ : List[Any] = lbp.local_binary_value(__lowercase ,__lowercase ,__lowercase )
assert lbp_image.any()
| 70 | from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : Any = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
| 70 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = ProphetNetTokenizer
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
A_ : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = 'UNwant\u00E9d,running'
A_ : str = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.tokenizer_class(self.vocab_file )
A_ : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
A_ : Dict = {}
for i, token in enumerate(lowercase ):
A_ : List[Any] = i
A_ : Optional[int] = WordpieceTokenizer(vocab=lowercase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
A_ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A_ : Union[str, Any] = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
A_ : Union[str, Any] = tokenizer(lowercase , padding=lowercase , return_tensors='pt' )
self.assertIsInstance(lowercase , lowercase )
A_ : str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
A_ : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 70 | def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_UpperCAmelCase = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_UpperCAmelCase = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : List[Any] ,__lowercase : Tuple ):
'''simple docstring'''
A_ : str = SavedModel()
A_ : List[Any] = []
with open(os.path.join(__lowercase ,'utils' ,'tf_ops' ,'onnx.json' ) ) as f:
A_ : Any = json.load(__lowercase )['opsets']
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowercase )] )
with open(__lowercase ,'rb' ) as f:
saved_model.ParseFromString(f.read() )
A_ : int = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
A_ : List[str] = sorted(__lowercase )
A_ : Any = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowercase )
if strict and len(__lowercase ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__lowercase ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*__lowercase ,sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
_UpperCAmelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 70 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : List[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : str = get_activation('gelu' )
A_ : int = get_activation('gelu_10' )
A_ : Optional[int] = torch_builtin(lowercase )
A_ : Tuple = geluaa(lowercase )
A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase ):
get_activation('bogus' )
with self.assertRaises(lowercase ):
get_activation(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = get_activation('gelu' )
A_ : List[str] = 1
A_ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase ):
A_ : str = acta.a
| 70 | 1 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCAmelCase ( __A ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = tempfile.mkdtemp()
A_ : Union[str, Any] = 8
# DPR tok
A_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A_ : List[Any] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(lowercase , exist_ok=lowercase )
A_ : List[str] = os.path.join(lowercase , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
A_ : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A_ : List[str] = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A_ : List[str] = {'unk_token': '<unk>'}
A_ : List[str] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(lowercase , exist_ok=lowercase )
A_ : str = os.path.join(lowercase , BART_VOCAB_FILES_NAMES['vocab_file'] )
A_ : List[str] = os.path.join(lowercase , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = os.path.join(self.tmpdirname , 'rag_tokenizer' )
A_ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
A_ : Tuple = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowercase )
rag_tokenizer.save_pretrained(lowercase )
A_ : int = RagTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowercase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowercase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
A_ : Optional[Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
A_ : str = tokenizer(lowercase )
self.assertIsNotNone(lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
A_ : int = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
A_ : str = tokenizer(lowercase )
self.assertIsNotNone(lowercase )
| 70 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.convolution(self.padding(lowercase ) )
A_ : List[str] = self.normalization(lowercase )
A_ : List[Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = config.num_channels
A_ : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Optional[int] = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.pooler(lowercase )
for layer_module in self.attention:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[int] = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : Optional[int] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : int = layer_module(lowercase )
A_ : Union[str, Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Dict = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : int = max(1 , out_channels // config.groups_width )
A_ : Optional[int] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : List[str] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = hidden_state
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : str = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Dict = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[Any] = config
A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : str = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase )
A_ : Optional[int] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Dict = encoder_outputs[0]
A_ : List[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : int = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : List[Any] = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : List[Any] = self.classifier[0](lowercase )
A_ : Union[str, Any] = self.classifier[1](lowercase )
A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 70 | 1 |
import functools
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
A_ : Tuple = len(__lowercase )
A_ : Any = len(__lowercase )
@functools.cache
def min_distance(__lowercase : int ,__lowercase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
A_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 ,__lowercase ) ,1 + min_distance(__lowercase ,indexa + 1 ) ,diff + min_distance(indexa + 1 ,indexa + 1 ) ,)
return min_distance(0 ,0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 42 # [batch_size x 3]
lowerCamelCase_ = 42 # [batch_size x 3]
lowerCamelCase_ = 42 # [batch_size x 3]
lowerCamelCase_ = 42 # [batch_size x 3]
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def lowerCAmelCase_ ( self ):
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = torch.arange(self.height * self.width )
A_ : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(lowercase , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : Optional[Any] = self.shape
A_ : Any = int(np.prod(lowercase ) )
A_ : Optional[int] = self.get_image_coords()
A_ : Union[str, Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
A_ : int = self.get_camera_rays(lowercase )
A_ : Union[str, Any] = rays.view(lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , *A_ , A_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
A_ : str = coords.view(lowercase , -1 , 2 )
A_ : Union[str, Any] = self.resolution()
A_ : List[Any] = self.fov()
A_ : List[str] = (flat.float() / (res - 1)) * 2 - 1
A_ : Optional[Any] = fracs * torch.tan(fov / 2 )
A_ : Tuple = fracs.view(lowercase , -1 , 2 )
A_ : Optional[Any] = (
self.z.view(lowercase , 1 , 3 )
+ self.x.view(lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
A_ : List[str] = directions / directions.norm(dim=-1 , keepdim=lowercase )
A_ : List[str] = torch.stack(
[
torch.broadcast_to(self.origin.view(lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowercase , *lowercase , 2 , 3 )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowercase , height=lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : List[str] = []
A_ : str = []
A_ : Tuple = []
A_ : str = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
A_ : Dict = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
A_ : Any = -z * 4
A_ : Union[str, Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
A_ : Optional[int] = np.cross(__lowercase ,__lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(__lowercase ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(__lowercase ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(__lowercase ,axis=0 ) ).float() ,width=__lowercase ,height=__lowercase ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(__lowercase )) ,)
| 70 | def UpperCamelCase ( __lowercase : list ):
'''simple docstring'''
A_ : str = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
A_ , A_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 70 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase = 250004
_UpperCAmelCase = 250020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer
lowerCamelCase_ = MBartTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Any = MBartTokenizer(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = MBartTokenizer(lowercase , keep_accents=lowercase )
A_ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
A_ : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A_ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A_ : Any = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A_ : Tuple = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Dict = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Dict = tempfile.mkdtemp()
A_ : Dict = tokenizer_r.save_pretrained(lowercase )
A_ : List[str] = tokenizer_p.save_pretrained(lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A_ : Tuple = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase , lowercase )
# Checks everything loads correctly in the same way
A_ : Any = tokenizer_r.from_pretrained(lowercase )
A_ : List[Any] = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase )
# Save tokenizer rust, legacy_format=True
A_ : Optional[int] = tempfile.mkdtemp()
A_ : str = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase )
A_ : List[str] = tokenizer_p.save_pretrained(lowercase )
# Checks it save with the same files
self.assertSequenceEqual(lowercase , lowercase )
# Checks everything loads correctly in the same way
A_ : List[str] = tokenizer_r.from_pretrained(lowercase )
A_ : List[str] = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
shutil.rmtree(lowercase )
# Save tokenizer rust, legacy_format=False
A_ : Optional[int] = tempfile.mkdtemp()
A_ : List[str] = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase )
A_ : Optional[int] = tokenizer_p.save_pretrained(lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A_ : int = tokenizer_r.from_pretrained(lowercase )
A_ : Union[str, Any] = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
shutil.rmtree(lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = '''facebook/mbart-large-en-ro'''
lowerCamelCase_ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase_ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
A_ : int = 1
return cls
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertIn(lowercase , self.tokenizer.all_special_ids )
A_ : Optional[int] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
A_ : int = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase )
A_ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
self.assertNotIn(self.tokenizer.eos_token , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , lowercase )
A_ : List[Any] = 1_0
A_ : int = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase )
self.assertEqual(len(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = tempfile.mkdtemp()
A_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase )
A_ : str = MBartTokenizer.from_pretrained(lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors='pt' )
A_ : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
A_ : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
A_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors='pt' )
A_ : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=1_0 , return_tensors='pt' )
A_ : List[str] = targets['input_ids']
A_ : Dict = shift_tokens_right(lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowercase ) , {
# A, test, EOS, en_XX
'input_ids': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
} , )
| 70 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''wavlm'''
def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : List[Any] = hidden_size
A_ : Tuple = feat_extract_norm
A_ : Dict = feat_extract_activation
A_ : Optional[Any] = list(lowercase )
A_ : Union[str, Any] = list(lowercase )
A_ : List[str] = list(lowercase )
A_ : str = conv_bias
A_ : Tuple = num_buckets
A_ : Union[str, Any] = max_bucket_distance
A_ : int = num_conv_pos_embeddings
A_ : str = num_conv_pos_embedding_groups
A_ : str = len(self.conv_dim )
A_ : Tuple = num_hidden_layers
A_ : Tuple = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[Any] = num_attention_heads
A_ : str = hidden_dropout
A_ : Optional[int] = attention_dropout
A_ : Optional[Any] = activation_dropout
A_ : Optional[int] = feat_proj_dropout
A_ : List[Any] = final_dropout
A_ : Union[str, Any] = layerdrop
A_ : Dict = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : str = num_ctc_classes
A_ : Any = vocab_size
A_ : str = do_stable_layer_norm
A_ : int = use_weighted_layer_sum
A_ : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : List[str] = apply_spec_augment
A_ : Optional[Any] = mask_time_prob
A_ : int = mask_time_length
A_ : Any = mask_time_min_masks
A_ : Optional[int] = mask_feature_prob
A_ : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
A_ : int = num_codevectors_per_group
A_ : Any = num_codevector_groups
A_ : List[Any] = contrastive_logits_temperature
A_ : Optional[Any] = num_negatives
A_ : Optional[Any] = codevector_dim
A_ : int = proj_codevector_dim
A_ : int = diversity_loss_weight
# ctc loss
A_ : Union[str, Any] = ctc_loss_reduction
A_ : Any = ctc_zero_infinity
# adapter
A_ : int = add_adapter
A_ : Optional[Any] = adapter_kernel_size
A_ : Optional[int] = adapter_stride
A_ : Dict = num_adapter_layers
A_ : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : Tuple = list(lowercase )
A_ : Optional[Any] = list(lowercase )
A_ : Dict = list(lowercase )
A_ : Dict = xvector_output_dim
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 70 | 1 |
def UpperCamelCase ( __lowercase : int ,__lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
def count_of_possible_combinations(__lowercase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__lowercase )
def UpperCamelCase ( __lowercase : int ,__lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
__lowercase : int ,__lowercase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A_ : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item ,__lowercase )
for item in array )
A_ : Any = answer
return answer
A_ : str = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__lowercase ,__lowercase )
def UpperCamelCase ( __lowercase : int ,__lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
A_ : Optional[int] = [0] * (target + 1)
A_ : Optional[Any] = 1
for i in range(1 ,target + 1 ):
for j in range(__lowercase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = 3
_UpperCAmelCase = 5
_UpperCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 70 | import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger()
def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase )
else:
A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase )
if hidden_sizes == 1_92:
A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase )
if hidden_sizes == 2_56:
A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase )
if hidden_sizes == 3_84:
A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase )
from_model.eval()
A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval()
A_ : Union[str, Any] = OrderedDict()
A_ : Dict = from_model.state_dict()
A_ : Tuple = list(from_model.state_dict().keys() )
A_ : str = list(our_model.state_dict().keys() )
print(len(__lowercase ) ,len(__lowercase ) )
for i in range(len(__lowercase ) ):
A_ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowercase )
A_ : str = torch.randn((2, 3, 2_24, 2_24) )
A_ : str = from_model(__lowercase )
A_ : Optional[Any] = our_model(__lowercase ).logits
assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one."
A_ : List[str] = name
print(__lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A_ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ):
'''simple docstring'''
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : Optional[int] = 10_00
A_ : Optional[int] = (1, num_labels)
A_ : int = 'huggingface/label-files'
A_ : int = num_labels
A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase )
A_ : Any = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
A_ : Tuple = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 70 | 1 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
self.check_model_type(lowercase )
def lowerCAmelCase_ ( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
A_ , A_ : List[Any] = {}, {}
if padding is not None:
A_ : Union[str, Any] = padding
if truncation is not None:
A_ : Optional[Any] = truncation
if top_k is not None:
A_ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase , lowercase = None , **lowercase ):
"""simple docstring"""
if isinstance(lowercase , (Image.Image, str) ) and isinstance(lowercase , lowercase ):
A_ : Tuple = {'image': image, 'question': question}
else:
A_ : Union[str, Any] = image
A_ : str = super().__call__(lowercase , **lowercase )
return results
def lowerCAmelCase_ ( self , lowercase , lowercase=False , lowercase=False ):
"""simple docstring"""
A_ : Any = load_image(inputs['image'] )
A_ : Any = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase , truncation=lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors=self.framework )
model_inputs.update(lowercase )
return model_inputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.model(**lowercase )
return model_outputs
def lowerCAmelCase_ ( self , lowercase , lowercase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
A_ : str = self.model.config.num_labels
if self.framework == "pt":
A_ : int = model_outputs.logits.sigmoid()[0]
A_ , A_ : Optional[int] = probs.topk(lowercase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
A_ : List[str] = scores.tolist()
A_ : int = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
| 70 | def UpperCamelCase ( __lowercase : str ,__lowercase : int ):
'''simple docstring'''
A_ : int = word.split()
def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str:
A_ : Optional[Any] = max_width - width
A_ : Union[str, Any] = len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A_ : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A_ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A_ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
A_ : Tuple = []
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
A_ : List[str] = []
A_ : list[str] = []
A_ : Dict = 0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase ,__lowercase ,__lowercase ) )
# reset new line and new width
A_ , A_ : Any = [word], len(__lowercase )
A_ : int = max_width - width - len(__lowercase )
answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 | 1 |
from manim import *
class UpperCAmelCase ( __A ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
A_ : List[Any] = Rectangle(height=0.25 , width=0.25 )
A_ : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : Optional[int] = [mem.copy() for i in range(6 )]
A_ : Optional[int] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*lowercase ).arrange(lowercase , buff=0 )
A_ : Tuple = VGroup(*lowercase ).arrange(lowercase , buff=0 )
A_ : int = VGroup(lowercase , lowercase ).arrange(lowercase , buff=0 )
A_ : Optional[int] = Text('CPU' , font_size=2_4 )
A_ : Any = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase )
A_ : Any = [mem.copy() for i in range(4 )]
A_ : List[str] = VGroup(*lowercase ).arrange(lowercase , buff=0 )
A_ : int = Text('GPU' , font_size=2_4 )
A_ : Optional[int] = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase )
gpu.move_to([-1, -1, 0] )
self.add(lowercase )
A_ : Optional[Any] = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*lowercase ).arrange(lowercase , buff=0 )
A_ : int = Text('Model' , font_size=2_4 )
A_ : Any = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase )
model.move_to([3, -1.0, 0] )
self.add(lowercase )
A_ : Optional[int] = []
A_ : Union[str, Any] = []
A_ : Dict = []
for i, rect in enumerate(lowercase ):
rect.set_stroke(lowercase )
A_ : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowercase , buff=0.0 )
self.add(lowercase )
model_cpu_arr.append(lowercase )
self.add(*lowercase , *lowercase , *lowercase )
A_ : Union[str, Any] = [mem.copy() for i in range(6 )]
A_ : Tuple = VGroup(*lowercase ).arrange(lowercase , buff=0 )
A_ : int = Text('Loaded Checkpoint' , font_size=2_4 )
A_ : Union[str, Any] = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowercase )
A_ : Tuple = []
A_ : List[Any] = []
for i, rect in enumerate(lowercase ):
A_ : Tuple = fill.copy().set_fill(lowercase , opacity=0.7 )
target.move_to(lowercase )
ckpt_arr.append(lowercase )
A_ : Tuple = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowercase )
self.add(*lowercase , *lowercase )
A_ : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Tuple = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase , lowercase )
A_ : Any = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowercase )
A_ : Any = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
A_ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
A_ : List[Any] = [meta_mem.copy() for i in range(6 )]
A_ : Union[str, Any] = VGroup(*lowercase ).arrange(lowercase , buff=0 )
A_ : str = VGroup(*lowercase ).arrange(lowercase , buff=0 )
A_ : Any = VGroup(lowercase , lowercase ).arrange(lowercase , buff=0 )
A_ : int = Text('Disk' , font_size=2_4 )
A_ : Dict = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowercase , run_time=3 ) , Write(lowercase , run_time=1 ) , Create(lowercase , run_time=1 ) )
A_ : Optional[int] = []
for i, rect in enumerate(lowercase ):
A_ : str = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowercase , run_time=1.5 ) )
self.play(*lowercase )
self.play(FadeOut(lowercase ) )
A_ : Any = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase , run_time=3 ) )
self.play(
FadeOut(lowercase , lowercase , *lowercase , *lowercase ) , )
self.wait()
| 70 | import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''summarization'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ROUGE_KEYS
lowerCamelCase_ = '''rouge2'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
A_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
A_ : List[str] = Path(self.output_dir ) / 'metrics.json'
A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
A_ : str = 0
A_ : Any = defaultdict(lowercase )
A_ : Union[str, Any] = self.config.model_type
A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
A_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ : Optional[Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ : Tuple = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ : int = get_git_info()['repo_sha']
A_ : int = hparams.num_workers
A_ : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ : Any = self.decoder_start_token_id
A_ : str = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
A_ : Union[str, Any] = False
A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ : int = self.hparams.eval_max_gen_length
else:
A_ : List[Any] = self.model.config.max_length
A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
A_ : int = True
return readable_batch
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
return self.model(lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer.pad_token_id
A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask']
A_ : str = batch['labels']
if isinstance(self.model , lowercase ):
A_ : Optional[int] = self.model._shift_right(lowercase )
else:
A_ : Any = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ : Optional[Any] = decoder_input_ids
self.save_readable_batch(lowercase )
A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
A_ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 )
A_ , A_ : Any = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self._step(lowercase )
A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
A_ : str = batch['input_ids'].shape[0]
A_ : Any = batch['input_ids'].eq(self.pad ).sum()
A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase="val" ):
"""simple docstring"""
self.step_count += 1
A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ : Dict = losses['loss']
A_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
A_ : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ : Tuple = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
A_ : Dict = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_rouge(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ : Optional[int] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ : int = (time.time() - ta) / batch['input_ids'].shape[0]
A_ : List[str] = self.ids_to_clean_text(lowercase )
A_ : List[str] = self.ids_to_clean_text(batch['labels'] )
A_ : List[Any] = self._step(lowercase )
A_ : int = dict(zip(self.loss_names , lowercase ) )
A_ : Dict = self.calc_generative_metrics(lowercase , lowercase )
A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.validation_epoch_end(lowercase , prefix='test' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.n_obs[type_path]
A_ : List[Any] = self.target_lens[type_path]
A_ : str = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
A_ : Optional[int] = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase )
parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase )
parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
'--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''translation'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ['''bleu''']
lowerCamelCase_ = '''bleu'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , **lowercase )
A_ : List[Any] = hparams.src_lang
A_ : str = hparams.tgt_lang
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_bleu(lowercase , lowercase )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ : SummarizationModule = SummarizationModule(__lowercase )
else:
A_ : SummarizationModule = TranslationModule(__lowercase )
A_ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
A_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase )
A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
A_ : str = False
A_ : Dict = args.val_metric == 'loss'
A_ : pl.Trainer = generic_train(
__lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,)
pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
A_ : Optional[Any] = ''
A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) )
if checkpoints:
A_ : List[Any] = checkpoints[-1]
A_ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
_UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase = parser.parse_args()
main(args)
| 70 | 1 |
import cmath
import math
def UpperCamelCase ( __lowercase : float ,__lowercase : float ,__lowercase : float ,__lowercase : float ):
'''simple docstring'''
A_ : Dict = math.radians(__lowercase )
A_ : Optional[int] = math.radians(__lowercase )
# Convert voltage and current to rectangular form
A_ : Optional[Any] = cmath.rect(__lowercase ,__lowercase )
A_ : List[str] = cmath.rect(__lowercase ,__lowercase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Dict = image_size
A_ : str = num_channels
A_ : Union[str, Any] = embeddings_size
A_ : Optional[Any] = hidden_sizes
A_ : Any = depths
A_ : List[str] = is_training
A_ : int = use_labels
A_ : Optional[Any] = hidden_act
A_ : List[Any] = num_labels
A_ : Optional[int] = scope
A_ : int = len(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = TFRegNetModel(config=lowercase )
A_ : Optional[Any] = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : Tuple = TFRegNetForImageClassification(lowercase )
A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = TFRegNetModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : List[Any] = model_class(lowercase )
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Any = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase )
A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
A_ : Dict = model_class(lowercase )
A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase )
A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : Any = self._prepare_for_class(lowercase , lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : Any = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Tuple = model(**lowercase , training=lowercase )
# verify the logits
A_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 70 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 70 | def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : int = 0
while b > 0:
if b & 1:
A_ : Any = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 70 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''xlm-prophetnet'''
lowerCamelCase_ = ['''past_key_values''']
lowerCamelCase_ = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self , lowercase = 0.1 , lowercase = "gelu" , lowercase = 3_0_5_2_2 , lowercase = 1_0_2_4 , lowercase = 4_0_9_6 , lowercase = 1_2 , lowercase = 1_6 , lowercase = 4_0_9_6 , lowercase = 1_2 , lowercase = 1_6 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 5_1_2 , lowercase = 0.02 , lowercase = True , lowercase = True , lowercase = 0 , lowercase = 2 , lowercase = 3_2 , lowercase = 1_2_8 , lowercase = False , lowercase = 0.0 , lowercase = True , lowercase = 0 , lowercase = 1 , lowercase = 2 , **lowercase , ):
"""simple docstring"""
A_ : List[str] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : int = encoder_ffn_dim
A_ : Dict = num_encoder_layers
A_ : Dict = num_encoder_attention_heads
A_ : str = decoder_ffn_dim
A_ : str = num_decoder_layers
A_ : Any = num_decoder_attention_heads
A_ : List[Any] = max_position_embeddings
A_ : int = init_std # Normal(0, this parameter)
A_ : Optional[int] = activation_function
# parameters for xlmprophetnet
A_ : Optional[Any] = ngram
A_ : Optional[int] = num_buckets
A_ : List[Any] = relative_max_distance
A_ : str = disable_ngram_loss
A_ : int = eps
# 3 Types of Dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : Tuple = dropout
A_ : Optional[int] = use_cache
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , add_cross_attention=lowercase , decoder_start_token_id=lowercase , **lowercase , )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 70 | def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70 | 1 |
import qiskit
def UpperCamelCase ( __lowercase : int ,__lowercase : int ):
'''simple docstring'''
A_ : List[Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
A_ : Union[str, Any] = qiskit.QuantumCircuit(__lowercase ,__lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] ,[0] )
# Execute the circuit on the simulator
A_ : List[str] = qiskit.execute(__lowercase ,__lowercase ,shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 70 | from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( *lowercase , **lowercase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
A_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
A_ : int = image_classifier(lowercase , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowercase ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
A_ : Optional[int] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
],
[
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
],
[
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
],
[
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
],
[
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
],
] , )
@require_tf
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
A_ : Optional[Any] = image_classifier(lowercase , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(lowercase ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
A_ : str = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
],
[
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
],
[
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
],
[
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
],
[
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
{'score': 0.333, 'label': ANY(lowercase )},
],
] , )
@slow
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
A_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
A_ : Union[str, Any] = image_classifier(lowercase , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowercase ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
A_ : Optional[int] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
A_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
A_ : Union[str, Any] = image_classifier(lowercase , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowercase ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
A_ : str = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 70 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_UpperCAmelCase = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
A_ : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'LABEL_0', 'score': 0.504}] )
A_ : Optional[Any] = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(lowercase ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
A_ : Union[str, Any] = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
A_ : int = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
A_ : Union[str, Any] = text_classifier('This is great !' , return_all_scores=lowercase )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'LABEL_0', 'score': 0.504}] )
A_ : Tuple = text_classifier('This is great !' , return_all_scores=lowercase )
self.assertEqual(
nested_simplify(lowercase ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
A_ : Optional[int] = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowercase )
self.assertEqual(
nested_simplify(lowercase ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
A_ : int = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowercase )
self.assertEqual(
nested_simplify(lowercase ) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
import torch
A_ : Optional[int] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
A_ : int = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
A_ : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = pipeline('text-classification' )
A_ : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'POSITIVE', 'score': 1.0}] )
A_ : Any = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
A_ : Optional[Any] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = pipeline('text-classification' , framework='tf' )
A_ : Any = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'POSITIVE', 'score': 1.0}] )
A_ : Optional[Any] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
A_ : int = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(lowercase ) , [{'label': 'POSITIVE', 'score': 0.988}] )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Dict = TextClassificationPipeline(model=lowercase , tokenizer=lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
A_ : Tuple = 'HuggingFace is in'
A_ : Union[str, Any] = text_classifier(lowercase )
self.assertEqual(nested_simplify(lowercase ) , [{'label': ANY(lowercase ), 'score': ANY(lowercase )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
A_ : Any = ['HuggingFace is in ', 'Paris is in France']
A_ : Any = text_classifier(lowercase )
self.assertEqual(
nested_simplify(lowercase ) , [{'label': ANY(lowercase ), 'score': ANY(lowercase )}, {'label': ANY(lowercase ), 'score': ANY(lowercase )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
A_ : List[Any] = text_classifier(lowercase , top_k=lowercase )
A_ : Optional[Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase ) , [[{'label': ANY(lowercase ), 'score': ANY(lowercase )}] * N, [{'label': ANY(lowercase ), 'score': ANY(lowercase )}] * N] , )
A_ : str = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
A_ : Tuple = text_classifier(lowercase )
self.assertEqual(
nested_simplify(lowercase ) , {'label': ANY(lowercase ), 'score': ANY(lowercase )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
A_ : Dict = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(lowercase ):
text_classifier(lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
A_ : str = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(lowercase ) , [{'label': ANY(lowercase ), 'score': ANY(lowercase )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 70 | from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 70 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | import random
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = num - 1
A_ : Optional[Any] = 0
while s % 2 == 0:
A_ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
A_ : Optional[int] = random.randrange(2 ,num - 1 )
A_ : Any = pow(__lowercase ,__lowercase ,__lowercase )
if v != 1:
A_ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A_ : Union[str, Any] = i + 1
A_ : Tuple = (v**2) % num
return True
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if num < 2:
return False
A_ : Optional[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def UpperCamelCase ( __lowercase : int = 10_24 ):
'''simple docstring'''
while True:
A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
_UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 70 | 1 |
from math import log
from scipy.constants import Boltzmann, physical_constants
_UpperCAmelCase = 300 # TEMPERATURE (unit = K)
def UpperCamelCase ( __lowercase : float ,__lowercase : float ,__lowercase : float ,):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
A_ : int = feature_size
A_ : List[Any] = sampling_rate
A_ : Dict = padding_value
A_ : List[Any] = kwargs.pop('padding_side' , 'right' )
A_ : str = kwargs.pop('return_attention_mask' , lowercase )
super().__init__(**lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ):
"""simple docstring"""
if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A_ : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
A_ : Union[str, Any] = processed_features[self.model_input_names[0]]
A_ : str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase ) == 0:
if return_attention_mask:
A_ : Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A_ : Optional[Any] = required_input[0]
if isinstance(lowercase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A_ : str = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase ):
A_ : List[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase ):
A_ : List[str] = 'tf'
elif is_torch_tensor(lowercase ):
A_ : Tuple = 'pt'
elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ):
A_ : str = 'np'
else:
raise ValueError(
F'''type of {first_element} unknown: {type(lowercase )}. '''
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A_ : Dict = to_numpy(lowercase )
else:
A_ : str = [to_numpy(lowercase ) for v in value]
# Convert padding_strategy in PaddingStrategy
A_ : List[Any] = self._get_padding_strategies(padding=lowercase , max_length=lowercase )
A_ : Union[str, Any] = processed_features[self.model_input_names[0]]
A_ : List[str] = len(lowercase )
if not all(len(lowercase ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A_ : List[str] = []
for i in range(lowercase ):
A_ : Optional[int] = {k: v[i] for k, v in processed_features.items()}
# truncation
A_ : Tuple = self._truncate(
lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , )
truncated_inputs.append(lowercase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A_ : int = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A_ : Optional[Any] = PaddingStrategy.MAX_LENGTH
A_ : List[str] = {}
for i in range(lowercase ):
# padding
A_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
for key, value in outputs.items():
if key not in batch_outputs:
A_ : Optional[int] = []
if value.dtype is np.dtype(np.floataa ):
A_ : Any = value.astype(np.floataa )
batch_outputs[key].append(lowercase )
return BatchFeature(lowercase , tensor_type=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ):
"""simple docstring"""
A_ : str = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A_ : List[Any] = len(lowercase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A_ : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A_ : List[Any] = np.ones(len(lowercase ) , dtype=np.intaa )
if needs_to_be_padded:
A_ : int = max_length - len(lowercase )
if self.padding_side == "right":
if return_attention_mask:
A_ : Union[str, Any] = np.pad(
processed_features['attention_mask'] , (0, difference) )
A_ : int = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A_ : str = np.pad(
lowercase , lowercase , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A_ : Any = np.pad(
processed_features['attention_mask'] , (difference, 0) )
A_ : List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A_ : Union[str, Any] = np.pad(
lowercase , lowercase , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A_ : Any = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A_ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A_ : Optional[int] = len(lowercase ) > max_length
if needs_to_be_truncated:
A_ : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A_ : Dict = processed_features['attention_mask'][:max_length]
return processed_features
def lowerCAmelCase_ ( self , lowercase=False , lowercase=None ):
"""simple docstring"""
if padding is not False:
if padding is True:
A_ : Union[str, Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase , lowercase ):
A_ : Optional[int] = PaddingStrategy(lowercase )
elif isinstance(lowercase , lowercase ):
A_ : str = padding
else:
A_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 70 | import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = 4
A_ : int = 3
A_ : List[str] = (3_2, 3_2)
A_ : Any = jax.random.PRNGKey(0 )
A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A_ : int = self.dummy_input
return init_dict, inputs_dict
| 70 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = StableUnCLIPImgaImgPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase_ = frozenset([] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = 3_2
A_ : Tuple = embedder_hidden_size
# image encoding components
A_ : Optional[Any] = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
torch.manual_seed(0 )
A_ : Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowercase , projection_dim=lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
A_ : int = StableUnCLIPImageNormalizer(embedding_dim=lowercase )
A_ : Optional[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A_ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A_ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase , layers_per_block=1 , upcast_attention=lowercase , use_linear_projection=lowercase , )
torch.manual_seed(0 )
A_ : Dict = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=lowercase , steps_offset=1 , )
torch.manual_seed(0 )
A_ : Dict = AutoencoderKL()
A_ : Dict = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowerCAmelCase_ ( self , lowercase , lowercase=0 , lowercase=True ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : Union[str, Any] = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase ) ).to(lowercase )
if pil_image:
A_ : Any = input_image * 0.5 + 0.5
A_ : str = input_image.clamp(0 , 1 )
A_ : Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A_ : int = DiffusionPipeline.numpy_to_pil(lowercase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : Tuple = self.get_dummy_components()
A_ : Optional[int] = StableUnCLIPImgaImgPipeline(**lowercase )
A_ : int = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[Any] = self.get_dummy_inputs(lowercase )
inputs.update({'image_embeds': None} )
A_ : Optional[int] = sd_pipe(**lowercase ).images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
A_ : Dict = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowercase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
A_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
A_ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Optional[Any] = pipe(lowercase , 'anime turle' , generator=lowercase , output_type='np' )
A_ : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
A_ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
A_ : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A_ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : str = pipe(lowercase , 'anime turle' , generator=lowercase , output_type='np' )
A_ : int = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : int = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A_ : Optional[Any] = pipe(
lowercase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
A_ : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 70 | import numpy as np
_UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Any = np.array(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = message.lower()
A_ : Tuple = message.replace(' ' , '' )
A_ : int = message.replace('j' , 'i' )
A_ : Any = np.empty((2, len(lowercase )) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
A_ : Union[str, Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) )
A_ : int = ''
for numbers_index in range(len(lowercase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : str = int(second_step[(numbers_index * 2) + 1] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = message.lower()
message.replace(' ' , '' )
A_ : Tuple = np.empty(2 * len(lowercase ) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Optional[int] = numbers[0]
A_ : Dict = numbers[1]
A_ : Optional[int] = first_step.reshape((2, len(lowercase )) )
A_ : List[str] = ''
for numbers_index in range(len(lowercase ) ):
A_ : List[Any] = int(second_step[0, numbers_index] )
A_ : Optional[int] = int(second_step[1, numbers_index] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : str = decoded_message + letter
return decoded_message
| 70 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''conditional_detr'''
lowerCamelCase_ = ['''past_key_values''']
lowerCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase=True , lowercase=None , lowercase=3 , lowercase=3_0_0 , lowercase=6 , lowercase=2_0_4_8 , lowercase=8 , lowercase=6 , lowercase=2_0_4_8 , lowercase=8 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=2_5_6 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , lowercase=False , lowercase="sine" , lowercase="resnet50" , lowercase=True , lowercase=False , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=1 , lowercase=1 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.25 , **lowercase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
A_ : Union[str, Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(lowercase , lowercase ):
A_ : int = backbone_config.get('model_type' )
A_ : Any = CONFIG_MAPPING[backbone_model_type]
A_ : Tuple = config_class.from_dict(lowercase )
A_ : Optional[int] = use_timm_backbone
A_ : Dict = backbone_config
A_ : List[str] = num_channels
A_ : Dict = num_queries
A_ : Dict = d_model
A_ : str = encoder_ffn_dim
A_ : Optional[Any] = encoder_layers
A_ : str = encoder_attention_heads
A_ : Optional[int] = decoder_ffn_dim
A_ : Optional[int] = decoder_layers
A_ : List[Any] = decoder_attention_heads
A_ : Dict = dropout
A_ : Any = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : Dict = activation_function
A_ : Optional[int] = init_std
A_ : Dict = init_xavier_std
A_ : Any = encoder_layerdrop
A_ : str = decoder_layerdrop
A_ : int = encoder_layers
A_ : str = auxiliary_loss
A_ : Optional[int] = position_embedding_type
A_ : List[str] = backbone
A_ : int = use_pretrained_backbone
A_ : Dict = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Optional[Any] = giou_cost
# Loss coefficients
A_ : Tuple = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : List[str] = cls_loss_coefficient
A_ : Union[str, Any] = bbox_loss_coefficient
A_ : Union[str, Any] = giou_loss_coefficient
A_ : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.d_model
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A_ : List[Any] = self.backbone_config.to_dict()
A_ : Union[str, Any] = self.__class__.model_type
return output
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1_2
| 70 | from math import sqrt
def UpperCamelCase ( __lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : int = 0
A_ : int = 0
A_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowercase ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | 1 |
from math import sqrt
def UpperCamelCase ( __lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : int = 0
A_ : int = 0
A_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowercase ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ):
"""simple docstring"""
super().__init__()
A_ : Tuple = initial_learning_rate
A_ : List[str] = warmup_steps
A_ : int = power
A_ : Dict = decay_schedule_fn
A_ : Any = name
def __call__( self , lowercase ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A_ : Optional[int] = tf.cast(lowercase , tf.floataa )
A_ : int = tf.cast(self.warmup_steps , tf.floataa )
A_ : Optional[int] = global_step_float / warmup_steps_float
A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,):
'''simple docstring'''
A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,)
if num_warmup_steps:
A_ : Tuple = WarmUp(
initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,)
if weight_decay_rate > 0.0:
A_ : Union[str, Any] = AdamWeightDecay(
learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,)
else:
A_ : Dict = tf.keras.optimizers.Adam(
learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ):
"""simple docstring"""
super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
A_ : Dict = weight_decay_rate
A_ : Union[str, Any] = include_in_weight_decay
A_ : str = exclude_from_weight_decay
@classmethod
def lowerCAmelCase_ ( cls , lowercase ):
"""simple docstring"""
A_ : Tuple = {'WarmUp': WarmUp}
return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase )
A_ : Optional[Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ):
"""simple docstring"""
A_ , A_ : Optional[int] = list(zip(*lowercase ) )
return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A_ : List[str] = apply_state or {}
A_ : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A_ : Dict = self._fallback_apply_state(lowercase , lowercase )
A_ : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return False
return True
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : int = []
A_ : Optional[int] = None
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self._accum_steps is None:
A_ : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowercase ):
"""simple docstring"""
if not self._gradients:
A_ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' )
for accum_gradient, gradient in zip(self._gradients , lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase ) )
| 70 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = GPTaTokenizer
lowerCamelCase_ = GPTaTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = {'''add_prefix_space''': True}
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
A_ : Dict = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A_ : List[str] = {'unk_token': '<unk>'}
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = 'lower newer'
A_ : Optional[int] = 'lower newer'
return input_text, output_text
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ : Any = 'lower newer'
A_ : Optional[int] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
A_ : Optional[int] = tokenizer.tokenize(lowercase , add_prefix_space=lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : List[Any] = tokens + [tokenizer.unk_token]
A_ : Any = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A_ : List[Any] = self.get_tokenizer()
A_ : Dict = self.get_rust_tokenizer(add_prefix_space=lowercase )
A_ : Dict = 'lower newer'
# Testing tokenization
A_ : int = tokenizer.tokenize(lowercase , add_prefix_space=lowercase )
A_ : List[str] = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids without special tokens
A_ : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
A_ : Tuple = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids with special tokens
A_ : Any = self.get_rust_tokenizer(add_prefix_space=lowercase )
A_ : Any = tokenizer.encode(lowercase , add_prefix_space=lowercase )
A_ : int = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing the unknown token
A_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
A_ : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self , lowercase=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ : Any = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
# Simple input
A_ : List[str] = 'This is a simple input'
A_ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
A_ : Optional[int] = ('This is a simple input', 'This is a pair')
A_ : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length' )
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length' )
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length' )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length' )
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
A_ : Any = 'This is a simple input'
A_ : Dict = ['This is a simple input looooooooong', 'This is a simple input']
A_ : Optional[Any] = ('This is a simple input', 'This is a pair')
A_ : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
A_ : Any = tokenizer.pad_token_id
A_ : Optional[Any] = tokenizer(lowercase , padding='max_length' , max_length=3_0 , return_tensors='np' )
A_ : List[str] = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors='np' )
A_ : Optional[int] = tokenizer(*lowercase , padding='max_length' , max_length=6_0 , return_tensors='np' )
A_ : List[Any] = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = '$$$'
A_ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase , add_bos_token=lowercase )
A_ : Dict = 'This is a simple input'
A_ : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
A_ : List[Any] = tokenizer.bos_token_id
A_ : List[Any] = tokenizer(lowercase )
A_ : Optional[int] = tokenizer(lowercase )
self.assertEqual(out_s.input_ids[0] , lowercase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A_ : Dict = tokenizer.decode(out_s.input_ids )
A_ : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = [self.get_tokenizer(do_lower_case=lowercase , add_bos_token=lowercase )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A_ : Dict = 'Encode this.'
A_ : Any = 'This one too please.'
A_ : str = tokenizer.encode(lowercase , add_special_tokens=lowercase )
encoded_sequence += tokenizer.encode(lowercase , add_special_tokens=lowercase )
A_ : Optional[int] = tokenizer.encode_plus(
lowercase , lowercase , add_special_tokens=lowercase , return_special_tokens_mask=lowercase , )
A_ : Union[str, Any] = encoded_sequence_dict['input_ids']
A_ : Any = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(lowercase ) , len(lowercase ) )
A_ : Optional[int] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase )
]
A_ : Optional[int] = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase , lowercase )
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=lowercase )
A_ : Union[str, Any] = 'A photo of a cat'
A_ : Optional[int] = tokenizer.encode(
lowercase , )
self.assertEqual(lowercase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('test_opt' )
A_ : List[str] = AutoTokenizer.from_pretrained('./test_opt' )
A_ : Union[str, Any] = tokenizer.encode(
lowercase , )
self.assertEqual(lowercase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=lowercase )
A_ : Tuple = 'A photo of a cat'
A_ : Dict = tokenizer.encode(
lowercase , )
# Same as above
self.assertEqual(lowercase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=lowercase )
A_ : Optional[Any] = 'bos'
A_ : List[Any] = tokenizer.get_vocab()['bos']
A_ : Any = 'A photo of a cat'
A_ : int = tokenizer.encode(
lowercase , )
# We changed the bos token
self.assertEqual(lowercase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('./tok' )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
A_ : Optional[int] = tokenizer.encode(
lowercase , )
self.assertEqual(lowercase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 70 | from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : Any = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
| 70 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.