code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> int:
super().__init__(_a )
_A : Optional[Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Any:
super().__init__(_a )
_A : List[str] = config.num_labels
_A : Optional[Any] = config.num_hidden_layers
_A : List[Any] = DeeRobertaModel(_a )
_A : str = nn.Dropout(config.hidden_dropout_prob )
_A : Optional[int] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> List[str]:
_A : Union[str, Any] = self.num_layers
try:
_A : Dict = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : Union[str, Any] = outputs[1]
_A : List[Any] = self.dropout(_a )
_A : Any = self.classifier(_a )
_A : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Any = e.exit_layer
_A : List[str] = outputs[0]
if not self.training:
_A : Optional[int] = entropy(_a )
_A : List[Any] = []
_A : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Tuple = MSELoss()
_A : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[str] = CrossEntropyLoss()
_A : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : str = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : str = MSELoss()
_A : List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : int = CrossEntropyLoss()
_A : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : Optional[int] = (loss,) + outputs
if not self.training:
_A : int = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 54
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 1
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_snake_case = logging.get_logger(__name__)
_snake_case = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = "first_exhausted",):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_,(Dataset, IterableDataset) ):
if isinstance(snake_case_,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"""is an empty dataset dictionary.""" )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(snake_case_ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.''' )
if i == 0:
_A , _A : Optional[int] = (
(Dataset, IterableDataset) if isinstance(snake_case_,snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_,snake_case_ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case_,snake_case_,snake_case_,info=snake_case_,split=snake_case_,stopping_strategy=snake_case_ )
else:
return _interleave_iterable_datasets(
snake_case_,snake_case_,snake_case_,info=snake_case_,split=snake_case_,stopping_strategy=snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = None,snake_case_ = 0,):
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_,(Dataset, IterableDataset) ):
if isinstance(snake_case_,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"""is an empty dataset dictionary.""" )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(snake_case_ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.''' )
if i == 0:
_A , _A : List[Any] = (
(Dataset, IterableDataset) if isinstance(snake_case_,snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_,snake_case_ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case_,info=snake_case_,split=snake_case_,axis=snake_case_ )
else:
return _concatenate_iterable_datasets(snake_case_,info=snake_case_,split=snake_case_,axis=snake_case_ )
| 54
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 1
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be trained."} )
_a = field(
default="./",metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_a = field(
default="codeparrot/codeparrot-clean-train",metadata={"help": "Name or path of training dataset."} )
_a = field(
default="codeparrot/codeparrot-clean-valid",metadata={"help": "Name or path of validation dataset."} )
_a = field(default=2,metadata={"help": "Batch size for training."} )
_a = field(default=2,metadata={"help": "Batch size for evaluation."} )
_a = field(default=0.1,metadata={"help": "Value of weight decay."} )
_a = field(
default=1_0_0_0_0,metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_a = field(default=2e-4,metadata={"help": "Learning rate fo training."} )
_a = field(default="cosine",metadata={"help": "Learning rate."} )
_a = field(
default=7_5_0,metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_a = field(
default=1_6,metadata={"help": "Number of gradient accumulation steps."} )
_a = field(
default=UpperCamelCase__,metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_a = field(default=5_0_0_0_0,metadata={"help": "Maximum number of training steps."} )
_a = field(
default=-1,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_a = field(default=1_0_2_4,metadata={"help": "Sequence lengths used for training."} )
_a = field(default=1,metadata={"help": "Training seed."} )
_a = field(
default=1_0_2_4,metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_a = field(default=UpperCamelCase__,metadata={"help": "If True the data is pretokenized."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be evaluated."} )
_a = field(
default="codeparrot/codeparrot-clean-valid",metadata={"help": "Name or path of validation dataset."} )
_a = field(default=2,metadata={"help": "Batch size used for evaluation."} )
_a = field(
default=-1,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_a = field(default=1_0_2_4,metadata={"help": "Length of sequences to be evaluated."} )
_a = field(default=1,metadata={"help": "Random seed used for evaluation."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be evaluated."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Number of workers used for code evaluation."} )
_a = field(
default=UpperCamelCase__,metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Sample from the language model's output distribution."} )
_a = field(default=0.2,metadata={"help": "Sampling temperature used for generation."} )
_a = field(default=2_5_6,metadata={"help": "Maximum number of newly generated tokens."} )
_a = field(default=0,metadata={"help": "Top-k parameter used for generation."} )
_a = field(default=0.95,metadata={"help": "Top-p parameter used for nucleus sampling."} )
_a = field(default=1_0,metadata={"help": "Number of generations to run in parallel."} )
_a = field(
default=2_0_0,metadata={"help": "Number of completions to generate for each sample."} )
_a = field(default=1,metadata={"help": "Random seed used for evaluation."} )
_a = field(
default="eval_results.json",metadata={"help": "Random seed used for evaluation."} )
_a = field(
default="0",metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_a = field(
default=-1,metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
},)
@dataclass
class lowercase :
_a = field(
default=UpperCamelCase__,metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
},)
_a = field(
default="transformersbook/codeparrot",metadata={"help": "Folder or name of dataset to process."} )
_a = field(
default="codeparrot-clean",metadata={"help": "Folder to save processed processed dataset."} )
_a = field(
default=1_0_0_0_0_0,metadata={"help": "Number of files to save per JSON output file."} )
_a = field(default="content",metadata={"help": "Column containing text data to process."} )
_a = field(
default=1_0_0_0,metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_a = field(
default=1_0_0,metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_a = field(
default=0.25,metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_a = field(
default=1.5,metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_a = field(
default=0.7,metadata={"help": "Probability for filtering config, test and uncommon files."} )
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Name or path to the tokenizer."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "If True, near-duplicate samples are removed."} )
_a = field(
default=0.85,metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class lowercase :
_a = field(
default="gpt2",metadata={"help": "Base tokenizer to build new tokenizer from."} )
_a = field(
default="transformersbook/codeparrot-train",metadata={"help": "Dataset to train tokenizer on."} )
_a = field(default="content",metadata={"help": "Column containing text data to process."} )
_a = field(default=2_0_0_0_0_0,metadata={"help": "Number of examples to train tokenizer on."} )
_a = field(
default=3_2_7_6_8,metadata={"help": "Number of examples to train the tokenizer on."} )
_a = field(default="codeparrot",metadata={"help": "Name of new tokenizer."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Name or path to the tokenizer."} )
_a = field(
default="codeparrot/codeparrot-clean-train",metadata={"help": "Name or path to the dataset to pretokenize."} )
_a = field(
default="tokenized-codeparrot-train",metadata={"help": "Repo name of the pretokenized data."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class lowercase :
_a = field(
default="gpt2-large",metadata={"help": "Configuration to use for model initialization."} )
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Tokenizer attached to model."} )
_a = field(default="codeparrot",metadata={"help": "Name of the created model."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Push saved tokenizer to the hub."} )
| 54
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "roberta"
def __init__( self , _a=5_0265 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> List[str]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Dict = vocab_size
_A : str = hidden_size
_A : List[Any] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Union[str, Any] = hidden_act
_A : List[str] = intermediate_size
_A : int = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : List[Any] = max_position_embeddings
_A : Union[str, Any] = type_vocab_size
_A : Optional[Any] = initializer_range
_A : int = layer_norm_eps
_A : List[str] = position_embedding_type
_A : List[Any] = use_cache
_A : Dict = classifier_dropout
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 54
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 1
|
class lowercase :
def __init__( self , _a , _a , _a ) -> Optional[int]:
_A : Optional[int] = None
_A : Any = None
_A : List[str] = graph
self._normalize_graph(_a , _a )
_A : Tuple = len(_a )
_A : Dict = None
def a__ ( self , _a , _a ) -> int:
if sources is int:
_A : Optional[Any] = [sources]
if sinks is int:
_A : str = [sinks]
if len(_a ) == 0 or len(_a ) == 0:
return
_A : Any = sources[0]
_A : Any = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_a ) > 1 or len(_a ) > 1:
_A : int = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A : int = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A : List[str] = max_input_flow
_A : Any = 0
_A : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A : List[Any] = max_input_flow
_A : Tuple = size - 1
def a__ ( self ) -> int:
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a__ ( self , _a ) -> Optional[Any]:
_A : Union[str, Any] = algorithm(self )
class lowercase :
def __init__( self , _a ) -> List[str]:
_A : str = flow_network
_A : int = flow_network.verticesCount
_A : Optional[Any] = flow_network.sourceIndex
_A : List[str] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A : List[str] = flow_network.graph
_A : Tuple = False
def a__ ( self ) -> Union[str, Any]:
if not self.executed:
self._algorithm()
_A : List[Any] = True
def a__ ( self ) -> Dict:
pass
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> Dict:
super().__init__(_a )
# use this to save your result
_A : str = -1
def a__ ( self ) -> Tuple:
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> List[Any]:
super().__init__(_a )
_A : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A : str = [0] * self.verticies_count
_A : Any = [0] * self.verticies_count
def a__ ( self ) -> Any:
_A : str = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A : Tuple = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A : List[Any] = 0
while i < len(_a ):
_A : List[str] = vertices_list[i]
_A : str = self.heights[vertex_index]
self.process_vertex(_a )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_a ) )
_A : int = 0
else:
i += 1
_A : List[str] = sum(self.preflow[self.source_index] )
def a__ ( self , _a ) -> Tuple:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_a , _a )
self.relabel(_a )
def a__ ( self , _a , _a ) -> Tuple:
_A : str = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A : Optional[int] = self.heights[to_index]
if min_height is not None:
_A : str = min_height + 1
if __name__ == "__main__":
_snake_case = [0]
_snake_case = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_snake_case = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_snake_case = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_snake_case = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 54
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase ( UpperCamelCase__ ):
_a = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , _a = True , **_a , ) -> None:
super().__init__(**_a )
_A : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
_A : Optional[Any] = get_size_dict(_a , default_to_square=_a )
_A : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_A : List[str] = get_size_dict(_a , default_to_square=_a , param_name="""crop_size""" )
_A : List[str] = do_resize
_A : Optional[Any] = size
_A : List[Any] = resample
_A : str = do_center_crop
_A : int = crop_size
_A : Optional[Any] = do_rescale
_A : List[Any] = rescale_factor
_A : Optional[Any] = do_normalize
_A : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_A : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
_A : Union[str, Any] = do_convert_rgb
def a__ ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_A : Tuple = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_A : Optional[int] = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
_A : Tuple = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def a__ ( self , _a , _a , _a = None , **_a , ) -> Optional[int]:
return rescale(_a , scale=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def a__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_A : Optional[int] = do_resize if do_resize is not None else self.do_resize
_A : Dict = size if size is not None else self.size
_A : List[str] = get_size_dict(_a , param_name="""size""" , default_to_square=_a )
_A : List[str] = resample if resample is not None else self.resample
_A : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : int = crop_size if crop_size is not None else self.crop_size
_A : List[Any] = get_size_dict(_a , param_name="""crop_size""" , default_to_square=_a )
_A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_A : Any = image_mean if image_mean is not None else self.image_mean
_A : List[str] = image_std if image_std is not None else self.image_std
_A : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_A : str = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_A : Dict = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
_A : int = [to_numpy_array(_a ) for image in images]
if do_resize:
_A : int = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
_A : Optional[Any] = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
_A : Union[str, Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_A : int = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_A : int = [to_channel_dimension_format(_a , _a ) for image in images]
_A : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 54
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 1
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_snake_case = numpy.array([0, 0])
_snake_case = numpy.array([0.5, 0.8_6_6_0_2_5_4])
_snake_case = numpy.array([1, 0])
_snake_case = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Tuple = initial_vectors
for _ in range(snake_case_ ):
_A : List[Any] = iteration_step(snake_case_ )
return vectors
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = []
for i, start_vector in enumerate(vectors[:-1] ):
_A : List[str] = vectors[i + 1]
new_vectors.append(snake_case_ )
_A : List[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = numpy.radians(snake_case_ )
_A , _A : Optional[Any] = numpy.cos(snake_case_ ), numpy.sin(snake_case_ )
_A : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : int = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_A , _A : int = zip(*snake_case_ )
plt.plot(snake_case_,snake_case_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = 384
_A : List[Any] = 7
if "tiny" in model_name:
_A : str = 96
_A : List[Any] = (2, 2, 6, 2)
_A : Dict = (3, 6, 12, 24)
elif "small" in model_name:
_A : Tuple = 96
_A : str = (2, 2, 18, 2)
_A : Any = (3, 6, 12, 24)
elif "base" in model_name:
_A : Optional[int] = 128
_A : Optional[Any] = (2, 2, 18, 2)
_A : int = (4, 8, 16, 32)
_A : List[Any] = 12
_A : int = 512
elif "large" in model_name:
_A : Any = 192
_A : Any = (2, 2, 18, 2)
_A : Optional[Any] = (6, 12, 24, 48)
_A : Any = 12
_A : Tuple = 768
# set label information
_A : Any = 150
_A : Optional[int] = """huggingface/label-files"""
_A : Optional[Any] = """ade20k-id2label.json"""
_A : Dict = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Tuple = {v: k for k, v in idalabel.items()}
_A : Any = SwinConfig(
embed_dim=snake_case_,depths=snake_case_,num_heads=snake_case_,window_size=snake_case_,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""],)
_A : Any = UperNetConfig(
backbone_config=snake_case_,auxiliary_in_channels=snake_case_,num_labels=snake_case_,idalabel=snake_case_,labelaid=snake_case_,)
return config
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_A : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_A : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
_A : Optional[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A : Optional[int] = in_proj_weight[:dim, :]
_A : Tuple = in_proj_bias[: dim]
_A : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
_A : Tuple = in_proj_bias[
dim : dim * 2
]
_A : int = in_proj_weight[
-dim :, :
]
_A : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( snake_case_ ):
_A , _A : Optional[int] = x.shape
_A : Optional[Any] = x.reshape(snake_case_,4,in_channel // 4 )
_A : Tuple = x[:, [0, 2, 1, 3], :].transpose(1,2 ).reshape(snake_case_,snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_ ):
_A , _A : Optional[Any] = x.shape
_A : str = x.reshape(snake_case_,in_channel // 4,4 )
_A : Any = x[:, :, [0, 2, 1, 3]].transpose(1,2 ).reshape(snake_case_,snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = x.shape[0]
_A : List[Any] = x.reshape(4,in_channel // 4 )
_A : Dict = x[[0, 2, 1, 3], :].transpose(0,1 ).reshape(snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = x.shape[0]
_A : Union[str, Any] = x.reshape(in_channel // 4,4 )
_A : str = x[:, [0, 2, 1, 3]].transpose(0,1 ).reshape(snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
_A : Optional[int] = model_name_to_url[model_name]
_A : int = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""",file_name=snake_case_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(snake_case_,param.shape )
_A : Tuple = get_upernet_config(snake_case_ )
_A : Optional[Any] = UperNetForSemanticSegmentation(snake_case_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_A : Any = state_dict.pop(snake_case_ )
if "bn" in key:
_A : List[Any] = key.replace("""bn""","""batch_norm""" )
_A : List[Any] = val
# rename keys
_A : Tuple = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
read_in_q_k_v(snake_case_,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_A : Union[str, Any] = reverse_correct_unfold_reduction_order(snake_case_ )
if "norm" in key:
_A : str = reverse_correct_unfold_norm_order(snake_case_ )
model.load_state_dict(snake_case_ )
# verify on image
_A : Union[str, Any] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_A : int = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ).convert("""RGB""" )
_A : int = SegformerImageProcessor()
_A : List[Any] = processor(snake_case_,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_A : Any = model(snake_case_ )
_A : str = outputs.logits
print(logits.shape )
print("""First values of logits:""",logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_A : List[Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
_A : Optional[Any] = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
_A : Union[str, Any] = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
_A : List[Any] = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print("""Logits:""",outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3],snake_case_,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 54
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( UpperCamelCase__ ):
_a = 42
_a = 42
def __init__( self , _a , _a ) -> Any:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 1 , _a = 2000 , _a = None , _a = "pil" , _a = True , **_a , ) -> Union[ImagePipelineOutput, Tuple]:
_A : List[Any] = self.unet.config.sample_size
_A : Any = (batch_size, 3, img_size, img_size)
_A : Dict = self.unet
_A : Any = randn_tensor(_a , generator=_a ) * self.scheduler.init_noise_sigma
_A : List[str] = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_A : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_A : Tuple = self.unet(_a , _a ).sample
_A : int = self.scheduler.step_correct(_a , _a , generator=_a ).prev_sample
# prediction step
_A : Optional[int] = model(_a , _a ).sample
_A : Optional[Any] = self.scheduler.step_pred(_a , _a , _a , generator=_a )
_A , _A : Tuple = output.prev_sample, output.prev_sample_mean
_A : Tuple = sample_mean.clamp(0 , 1 )
_A : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A : Any = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 54
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_snake_case = "src/diffusers"
# Matches is_xxx_available()
_snake_case = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
_snake_case = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
_snake_case = "\n{0} = None\n"
_snake_case = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
_snake_case = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = _re_backend.findall(snake_case_ )
if len(snake_case_ ) == 0:
return None
return "_and_".join(snake_case_ )
def lowerCAmelCase_ ( ):
with open(os.path.join(snake_case_,"""__init__.py""" ),"""r""",encoding="""utf-8""",newline="""\n""" ) as f:
_A : Optional[int] = f.readlines()
# Get to the point we do the actual imports for type checking
_A : Any = 0
_A : Optional[int] = {}
# Go through the end of the file
while line_index < len(snake_case_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_A : str = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
_A : Any = []
# Until we unindent, add backend objects to the list
while line_index < len(snake_case_ ) and len(lines[line_index] ) > 1:
_A : Optional[Any] = lines[line_index]
_A : Tuple = _re_single_line_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(snake_case_ ) > 0:
_A : Any = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if name.isupper():
return DUMMY_CONSTANT.format(snake_case_ )
elif name.islower():
return DUMMY_FUNCTION.format(snake_case_,snake_case_ )
else:
return DUMMY_CLASS.format(snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_=None ):
if backend_specific_objects is None:
_A : Dict = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_A : List[Any] = {}
for backend, objects in backend_specific_objects.items():
_A : int = """[""" + """, """.join(f'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
_A : str = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(snake_case_,snake_case_ ) for o in objects] )
_A : List[Any] = dummy_file
return dummy_files
def lowerCAmelCase_ ( snake_case_=False ):
_A : Dict = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_A : Optional[int] = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
_A : Optional[int] = os.path.join(snake_case_,"""utils""" )
_A : str = {
backend: os.path.join(snake_case_,f'''dummy_{short_names.get(snake_case_,snake_case_ )}_objects.py''' )
for backend in dummy_files.keys()
}
_A : List[str] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(snake_case_ ):
with open(snake_case_,"""r""",encoding="""utf-8""",newline="""\n""" ) as f:
_A : Optional[Any] = f.read()
else:
_A : int = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(snake_case_,snake_case_ )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend],"""w""",encoding="""utf-8""",newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'''diffusers.utils.dummy_{short_names.get(snake_case_,snake_case_ )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 54
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 1
|
from __future__ import annotations
class lowercase :
def __init__( self , _a ) -> None:
_A : Any = order
# a_{0} ... a_{k}
_A : Any = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_A : Optional[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_A : int = [0.0] * self.order
# y[n-1] ... y[n-k]
_A : Union[str, Any] = [0.0] * self.order
def a__ ( self , _a , _a ) -> None:
if len(_a ) < self.order:
_A : Optional[Any] = [1.0, *a_coeffs]
if len(_a ) != self.order + 1:
_A : Dict = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(_a )}'''
)
raise ValueError(_a )
if len(_a ) != self.order + 1:
_A : List[str] = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(_a )}'''
)
raise ValueError(_a )
_A : Union[str, Any] = a_coeffs
_A : Tuple = b_coeffs
def a__ ( self , _a ) -> float:
_A : Union[str, Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_A : Any = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_A : Union[str, Any] = self.input_history[:-1]
_A : Optional[Any] = self.output_history[:-1]
_A : int = sample
_A : str = result
return result
| 54
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
_A : Tuple = FileLock(str(tmpdir / """foo.lock""" ) )
_A : Optional[int] = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
_A : List[Any] = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = """a""" * 1000 + """.lock"""
_A : Tuple = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
_A : str = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 54
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowercase ( unittest.TestCase ):
def a__ ( self , _a , _a ) -> str:
_A : List[str] = jnp.ones((batch_size, length) ) / length
return scores
def a__ ( self ) -> Optional[int]:
_A : List[Any] = None
_A : Union[str, Any] = 20
_A : List[str] = self._get_uniform_logits(batch_size=2 , length=_a )
# tweak scores to not be uniform anymore
_A : str = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_A : str = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_A : str = jax.nn.softmax(_a , axis=-1 )
_A : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_A : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
_A : Union[str, Any] = jax.nn.softmax(temp_dist_warper_sharper(_a , scores.copy() , cur_len=_a ) , axis=-1 )
_A : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(_a , scores.copy() , cur_len=_a ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def a__ ( self ) -> Tuple:
_A : int = None
_A : List[Any] = 10
_A : int = 2
# create ramp distribution
_A : Optional[int] = np.broadcast_to(np.arange(_a )[None, :] , (batch_size, vocab_size) ).copy()
_A : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_A : int = FlaxTopKLogitsWarper(3 )
_A : Optional[int] = top_k_warp(_a , _a , cur_len=_a )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_A : Optional[Any] = 5
_A : Any = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_A : Any = np.broadcast_to(np.arange(_a )[None, :] , (batch_size, length) ).copy()
_A : Dict = top_k_warp_safety_check(_a , _a , cur_len=_a )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = None
_A : Optional[int] = 10
_A : Optional[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_A : Union[str, Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_A : Any = FlaxTopPLogitsWarper(0.8 )
_A : Union[str, Any] = np.exp(top_p_warp(_a , _a , cur_len=_a ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_A : List[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# check edge cases with negative and extreme logits
_A : List[str] = np.broadcast_to(np.arange(_a )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_A : List[Any] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_A : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_A : int = top_p_warp(_a , _a , cur_len=_a )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def a__ ( self ) -> int:
_A : Tuple = 20
_A : str = 4
_A : Optional[Any] = 0
_A : str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_a )
# check that min length is applied at length 5
_A : Dict = ids_tensor((batch_size, 20) , vocab_size=20 )
_A : Optional[Any] = 5
_A : Tuple = self._get_uniform_logits(_a , _a )
_A : List[str] = min_dist_processor(_a , _a , cur_len=_a )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
_A : Optional[Any] = self._get_uniform_logits(_a , _a )
_A : Tuple = 15
_A : Optional[int] = min_dist_processor(_a , _a , cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def a__ ( self ) -> Union[str, Any]:
_A : Optional[Any] = 20
_A : Any = 4
_A : str = 0
_A : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
# check that all scores are -inf except the bos_token_id score
_A : Dict = ids_tensor((batch_size, 1) , vocab_size=20 )
_A : Tuple = 1
_A : List[Any] = self._get_uniform_logits(_a , _a )
_A : int = logits_processor(_a , _a , cur_len=_a )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_A : Any = 3
_A : Dict = self._get_uniform_logits(_a , _a )
_A : List[str] = logits_processor(_a , _a , cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = 20
_A : Optional[int] = 4
_A : Any = 0
_A : Dict = 5
_A : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_a , eos_token_id=_a )
# check that all scores are -inf except the eos_token_id when max_length is reached
_A : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_A : Optional[Any] = 4
_A : str = self._get_uniform_logits(_a , _a )
_A : Any = logits_processor(_a , _a , cur_len=_a )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_A : Any = 3
_A : int = self._get_uniform_logits(_a , _a )
_A : List[str] = logits_processor(_a , _a , cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def a__ ( self ) -> List[Any]:
_A : Dict = 4
_A : Tuple = 10
_A : int = 15
_A : int = 2
_A : Any = 1
_A : Tuple = 15
# dummy input_ids and scores
_A : List[Any] = ids_tensor((batch_size, sequence_length) , _a )
_A : Optional[int] = input_ids.copy()
_A : Union[str, Any] = self._get_uniform_logits(_a , _a )
_A : Optional[int] = scores.copy()
# instantiate all dist processors
_A : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
_A : List[str] = FlaxTopKLogitsWarper(3 )
_A : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_A : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_a )
_A : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
_A : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=_a , eos_token_id=_a )
_A : Union[str, Any] = 10
# no processor list
_A : Optional[int] = temp_dist_warp(_a , _a , cur_len=_a )
_A : Tuple = top_k_warp(_a , _a , cur_len=_a )
_A : List[str] = top_p_warp(_a , _a , cur_len=_a )
_A : str = min_dist_proc(_a , _a , cur_len=_a )
_A : Union[str, Any] = bos_dist_proc(_a , _a , cur_len=_a )
_A : Optional[int] = eos_dist_proc(_a , _a , cur_len=_a )
# with processor list
_A : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_A : str = processor(_a , _a , cur_len=_a )
# scores should be equal
self.assertTrue(jnp.allclose(_a , _a , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def a__ ( self ) -> str:
_A : int = 4
_A : Tuple = 10
_A : Optional[Any] = 15
_A : str = 2
_A : str = 1
_A : Dict = 15
# dummy input_ids and scores
_A : int = ids_tensor((batch_size, sequence_length) , _a )
_A : List[Any] = input_ids.copy()
_A : int = self._get_uniform_logits(_a , _a )
_A : Optional[int] = scores.copy()
# instantiate all dist processors
_A : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
_A : Any = FlaxTopKLogitsWarper(3 )
_A : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_A : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_a )
_A : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
_A : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_a , eos_token_id=_a )
_A : List[str] = 10
# no processor list
def run_no_processor_list(_a , _a , _a ):
_A : List[str] = temp_dist_warp(_a , _a , cur_len=_a )
_A : Any = top_k_warp(_a , _a , cur_len=_a )
_A : str = top_p_warp(_a , _a , cur_len=_a )
_A : Optional[Any] = min_dist_proc(_a , _a , cur_len=_a )
_A : Optional[int] = bos_dist_proc(_a , _a , cur_len=_a )
_A : List[str] = eos_dist_proc(_a , _a , cur_len=_a )
return scores
# with processor list
def run_processor_list(_a , _a , _a ):
_A : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_A : Optional[Any] = processor(_a , _a , cur_len=_a )
return scores
_A : int = jax.jit(_a )
_A : str = jax.jit(_a )
_A : Tuple = jitted_run_no_processor_list(_a , _a , _a )
_A : str = jitted_run_processor_list(_a , _a , _a )
# scores should be equal
self.assertTrue(jnp.allclose(_a , _a , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 54
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 1
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "align_text_model"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=0 , _a="absolute" , _a=True , **_a , ) -> Dict:
super().__init__(**_a )
_A : Any = vocab_size
_A : Optional[Any] = hidden_size
_A : List[Any] = num_hidden_layers
_A : Union[str, Any] = num_attention_heads
_A : List[str] = hidden_act
_A : Union[str, Any] = intermediate_size
_A : Dict = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : Optional[int] = max_position_embeddings
_A : Any = type_vocab_size
_A : List[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = position_embedding_type
_A : Any = use_cache
_A : int = pad_token_id
@classmethod
def a__ ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_A , _A : Union[str, Any] = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
_A : List[Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class lowercase ( UpperCamelCase__ ):
_a = "align_vision_model"
def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.25 , _a = "swish" , _a = 2560 , _a = "mean" , _a = 0.02 , _a = 0.001 , _a = 0.99 , _a = 0.2 , **_a , ) -> str:
super().__init__(**_a )
_A : Optional[Any] = num_channels
_A : int = image_size
_A : int = width_coefficient
_A : Any = depth_coefficient
_A : List[str] = depth_divisor
_A : int = kernel_sizes
_A : Union[str, Any] = in_channels
_A : List[Any] = out_channels
_A : Tuple = depthwise_padding
_A : Union[str, Any] = strides
_A : int = num_block_repeats
_A : Any = expand_ratios
_A : Optional[int] = squeeze_expansion_ratio
_A : str = hidden_act
_A : Optional[Any] = hidden_dim
_A : List[Any] = pooling_type
_A : List[str] = initializer_range
_A : Union[str, Any] = batch_norm_eps
_A : Any = batch_norm_momentum
_A : str = drop_connect_rate
_A : List[str] = sum(_a ) * 4
@classmethod
def a__ ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_A , _A : Any = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
_A : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class lowercase ( UpperCamelCase__ ):
_a = "align"
_a = True
def __init__( self , _a=None , _a=None , _a=640 , _a=1.0 , _a=0.02 , **_a , ) -> List[str]:
super().__init__(**_a )
if text_config is None:
_A : List[Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
_A : str = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
_A : str = AlignTextConfig(**_a )
_A : Optional[int] = AlignVisionConfig(**_a )
_A : Dict = projection_dim
_A : Union[str, Any] = temperature_init_value
_A : Any = initializer_range
@classmethod
def a__ ( cls , _a , _a , **_a ) -> Tuple:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def a__ ( self ) -> str:
_A : Optional[int] = copy.deepcopy(self.__dict__ )
_A : List[str] = self.text_config.to_dict()
_A : Any = self.vision_config.to_dict()
_A : Tuple = self.__class__.model_type
return output
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=8 ):
_A : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=_a , scheduler=_a , movq=_a , )
_A : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Dict:
if latents is None:
_A : Any = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : List[Any] = latents.to(_a )
_A : Any = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : Any = torch.device(F'''cuda:{gpu_id}''' )
_A : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
def a__ ( self , _a=0 ) -> Any:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_A : List[str] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A : List[str] = cpu_offload_with_hook(_a , _a , prev_module_hook=_a )
# We'll offload the last model manually.
_A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self ) -> Optional[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 100 , _a = 4.0 , _a = 1 , _a = None , _a = None , _a = "pil" , _a = True , ) -> Optional[int]:
_A : Optional[int] = self._execution_device
_A : Any = guidance_scale > 1.0
if isinstance(_a , _a ):
_A : Union[str, Any] = torch.cat(_a , dim=0 )
if isinstance(_a , _a ):
_A : Any = torch.cat(_a , dim=0 )
if isinstance(_a , _a ):
_A : List[str] = torch.cat(_a , dim=0 )
_A : List[str] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_A : Any = image_embeds.repeat_interleave(_a , dim=0 )
_A : Dict = negative_image_embeds.repeat_interleave(_a , dim=0 )
_A : Union[str, Any] = hint.repeat_interleave(_a , dim=0 )
_A : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
_A : Union[str, Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
self.scheduler.set_timesteps(_a , device=_a )
_A : int = self.scheduler.timesteps
_A : str = self.movq.config.latent_channels
_A , _A : int = downscale_height_and_width(_a , _a , self.movq_scale_factor )
# create initial latent
_A : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : List[str] = {"""image_embeds""": image_embeds, """hint""": hint}
_A : Dict = self.unet(
sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0]
if do_classifier_free_guidance:
_A , _A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A : Dict = noise_pred.chunk(2 )
_A , _A : Optional[int] = variance_pred.chunk(2 )
_A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(
_a , _a , _a , generator=_a , )[0]
# post-processing
_A : str = self.movq.decode(_a , force_not_quantize=_a )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_A : Optional[Any] = image * 0.5 + 0.5
_A : Union[str, Any] = image.clamp(0 , 1 )
_A : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A : int = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class lowercase ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_a = field(default="summarization",metadata={"include_in_asdict_even_if_is_default": True} )
_a = Features({"text": Value("string" )} )
_a = Features({"summary": Value("string" )} )
_a = "text"
_a = "summary"
@property
def a__ ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 54
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 1
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> int:
torch.manual_seed(0 )
_A : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a__ ( self ) -> Dict:
_A : List[str] = self.dummy_uncond_unet
_A : Tuple = KarrasVeScheduler()
_A : Any = KarrasVePipeline(unet=_a , scheduler=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Dict = torch.manual_seed(0 )
_A : Optional[Any] = pipe(num_inference_steps=2 , generator=_a , output_type="""numpy""" ).images
_A : Optional[Any] = torch.manual_seed(0 )
_A : Tuple = pipe(num_inference_steps=2 , generator=_a , output_type="""numpy""" , return_dict=_a )[0]
_A : Tuple = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Tuple:
_A : int = """google/ncsnpp-celebahq-256"""
_A : str = UNetaDModel.from_pretrained(_a )
_A : int = KarrasVeScheduler()
_A : str = KarrasVePipeline(unet=_a , scheduler=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : int = torch.manual_seed(0 )
_A : Tuple = pipe(num_inference_steps=20 , generator=_a , output_type="""numpy""" ).images
_A : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A : Optional[Any] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 54
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 1
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""",[
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""",num_bytes=1337,num_examples=42,dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""",num_bytes=1337,num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
],)
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = split_dict._to_yaml_list()
assert len(snake_case_ ) == len(snake_case_ )
_A : Tuple = SplitDict._from_yaml_list(snake_case_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_A : Optional[Any] = None
# the split name of split_dict takes over the name of the split info object
_A : List[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""",[SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name="""my_dataset""" )] )
def lowerCAmelCase_ ( snake_case_ ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_A : List[str] = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_snake_case = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self , _a , _a = None , _a = None , _a = None , _a = True , ) -> Any:
_A : str = [file for file in os.listdir(_a ) if os.path.isfile(os.path.join(_a , _a ) )]
if identifier is not None:
_A : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_a , _a ):
for n_ in n_identifier:
_A : Optional[Any] = [file for file in files if n_ not in file]
else:
_A : List[Any] = [file for file in files if n_identifier not in file]
_A : Optional[Any] = ignore_files or []
ignore_files.append("""__init__.py""" )
_A : Union[str, Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _a )
if only_modules:
_A : str = file.split(""".""" )[0]
try:
_A : Union[str, Any] = getattr(_a , _a )
_A : Dict = doctest.DocTestSuite(_a )
_A : Tuple = unittest.TextTestRunner().run(_a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
_A : Any = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def a__ ( self ) -> Optional[int]:
_A : Optional[int] = Path("""src/transformers""" )
_A : Union[str, Any] = """modeling"""
_A : Optional[Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_a , identifier=_a , ignore_files=_a )
def a__ ( self ) -> int:
_A : int = Path("""src/transformers""" )
_A : List[str] = """tokenization"""
self.analyze_directory(_a , identifier=_a )
def a__ ( self ) -> Optional[int]:
_A : List[str] = Path("""src/transformers""" )
_A : Dict = """configuration"""
self.analyze_directory(_a , identifier=_a )
def a__ ( self ) -> List[str]:
_A : List[Any] = Path("""src/transformers""" )
_A : Union[str, Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_a , n_identifier=_a )
def a__ ( self ) -> int:
_A : Optional[Any] = Path("""docs/source""" )
_A : int = ["""favicon.ico"""]
self.analyze_directory(_a , ignore_files=_a , only_modules=_a )
| 54
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 1
|
import math
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if (
not isinstance(snake_case_,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if (
not isinstance(snake_case_,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 1
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[int]:
_A : Tuple = parent
_A : Optional[Any] = batch_size
_A : str = seq_length
_A : Optional[int] = is_training
_A : List[str] = use_input_mask
_A : List[str] = use_token_type_ids
_A : int = use_labels
_A : Union[str, Any] = vocab_size
_A : List[str] = hidden_size
_A : str = num_hidden_layers
_A : Any = num_attention_heads
_A : Union[str, Any] = intermediate_size
_A : int = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : List[str] = type_vocab_size
_A : Union[str, Any] = type_sequence_label_size
_A : Union[str, Any] = initializer_range
_A : Any = num_labels
_A : Any = num_choices
_A : Any = scope
def a__ ( self ) -> Optional[Any]:
_A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[int] = None
if self.use_input_mask:
_A : str = random_attention_mask([self.batch_size, self.seq_length] )
_A : List[Any] = None
if self.use_token_type_ids:
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Optional[Any] = None
_A : int = None
_A : List[str] = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : int = ids_tensor([self.batch_size] , self.num_choices )
_A : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> List[str]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
_A : Optional[int] = LlamaModel(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a , attention_mask=_a )
_A : Optional[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Any:
_A : Optional[int] = True
_A : Optional[Any] = LlamaModel(_a )
model.to(_a )
model.eval()
_A : Dict = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_A : Dict = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
_A : Union[str, Any] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
_A : Optional[int] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Tuple:
_A : List[str] = True
_A : Any = True
_A : str = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
_A : int = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
_A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
_A : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_A : Optional[Any] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
_A : List[Any] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
_A : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
_A : Any = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Optional[Any] = config_and_inputs
_A : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_a = (LlamaForCausalLM,) if is_torch_available() else ()
_a = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def a__ ( self ) -> Union[str, Any]:
_A : Dict = LlamaModelTester(self )
_A : Union[str, Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def a__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> int:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> int:
_A : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Optional[Any] = type
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Union[str, Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[str] = 3
_A : Tuple = input_dict["""input_ids"""]
_A : Tuple = input_ids.ne(1 ).to(_a )
_A : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Tuple = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> str:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Tuple = 3
_A : Union[str, Any] = """single_label_classification"""
_A : List[str] = input_dict["""input_ids"""]
_A : int = input_ids.ne(1 ).to(_a )
_A : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A : Tuple = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Tuple = 3
_A : Dict = """multi_label_classification"""
_A : Optional[Any] = input_dict["""input_ids"""]
_A : Any = input_ids.ne(1 ).to(_a )
_A : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_A : int = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def a__ ( self ) -> Optional[int]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def a__ ( self , _a ) -> Optional[Any]:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : Tuple = ids_tensor([1, 10] , config.vocab_size )
_A : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
_A : List[str] = original_model(_a ).last_hidden_state
_A : str = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A : Union[str, Any] = {"""type""": scaling_type, """factor""": 10.0}
_A : Optional[Any] = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
_A : List[str] = scaled_model(_a ).last_hidden_state
_A : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Tuple:
_A : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
_A : Optional[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_A : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> List[str]:
_A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
_A : List[str] = model(torch.tensor(_a ) )
# Expected mean on dim = -1
_A : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : List[Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Dict:
_A : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : List[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
_A : Optional[Any] = model(torch.tensor(_a ) )
# Expected mean on dim = -1
_A : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def a__ ( self ) -> int:
_A : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : Optional[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
_A : Any = model(torch.tensor(_a ) )
_A : List[str] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# fmt: off
_A : Any = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def a__ ( self ) -> Optional[int]:
_A : str = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
_A : Optional[int] = """Simply put, the theory of relativity states that """
_A : List[str] = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
_A : Dict = tokenizer.encode(_a , return_tensors="""pt""" )
_A : Any = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=_a )
# greedy generation outputs
_A : Optional[Any] = model.generate(_a , max_new_tokens=64 , top_p=_a , temperature=1 , do_sample=_a )
_A : List[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 54
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 1
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "xlm-prophetnet"
_a = ["past_key_values"]
_a = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , _a = 0.1 , _a = "gelu" , _a = 3_0522 , _a = 1024 , _a = 4096 , _a = 12 , _a = 16 , _a = 4096 , _a = 12 , _a = 16 , _a = 0.1 , _a = 0.1 , _a = 512 , _a = 0.02 , _a = True , _a = True , _a = 0 , _a = 2 , _a = 32 , _a = 128 , _a = False , _a = 0.0 , _a = True , _a = 0 , _a = 1 , _a = 2 , **_a , ) -> int:
_A : List[Any] = vocab_size
_A : Dict = hidden_size
_A : int = encoder_ffn_dim
_A : List[Any] = num_encoder_layers
_A : str = num_encoder_attention_heads
_A : Optional[int] = decoder_ffn_dim
_A : Any = num_decoder_layers
_A : List[str] = num_decoder_attention_heads
_A : List[str] = max_position_embeddings
_A : Dict = init_std # Normal(0, this parameter)
_A : Optional[int] = activation_function
# parameters for xlmprophetnet
_A : Union[str, Any] = ngram
_A : List[Any] = num_buckets
_A : int = relative_max_distance
_A : Dict = disable_ngram_loss
_A : Union[str, Any] = eps
# 3 Types of Dropout
_A : int = attention_dropout
_A : Optional[Any] = activation_dropout
_A : Union[str, Any] = dropout
_A : Tuple = use_cache
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , add_cross_attention=_a , decoder_start_token_id=_a , **_a , )
@property
def a__ ( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def a__ ( self , _a ) -> Any:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 54
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
_a = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PIL.Image.BICUBIC , _a = True , _a = None , _a = 1 / 255 , _a = True , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**_a )
_A : Union[str, Any] = size if size is not None else {"""height""": 256, """width""": 256}
_A : List[Any] = get_size_dict(_a )
_A : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_A : Union[str, Any] = get_size_dict(_a , param_name="""crop_size""" )
_A : str = do_resize
_A : Optional[int] = size
_A : Any = resample
_A : Any = do_center_crop
_A : List[Any] = crop_size
_A : Any = do_rescale
_A : List[Any] = rescale_factor
_A : Union[str, Any] = do_normalize
_A : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , _a , _a , _a = PIL.Image.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_A : List[Any] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_a , size=(size["""height"""], size["""width"""]) , resample=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
_A : Optional[Any] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def a__ ( self , _a , _a , _a = None , **_a , ) -> Optional[int]:
return rescale(_a , scale=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def a__ ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_A : int = do_resize if do_resize is not None else self.do_resize
_A : List[str] = resample if resample is not None else self.resample
_A : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : Any = do_rescale if do_rescale is not None else self.do_rescale
_A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_A : Any = image_mean if image_mean is not None else self.image_mean
_A : List[Any] = image_std if image_std is not None else self.image_std
_A : List[Any] = size if size is not None else self.size
_A : Optional[int] = get_size_dict(_a )
_A : List[str] = crop_size if crop_size is not None else self.crop_size
_A : Any = get_size_dict(_a , param_name="""crop_size""" )
_A : Tuple = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_A : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
_A : Union[str, Any] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
_A : Tuple = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
_A : Any = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_A : Union[str, Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_A : Optional[int] = [to_channel_dimension_format(_a , _a ) for image in images]
_A : int = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 54
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = 0.00
_A : Tuple = 0
for resistor in resistors:
if resistor <= 0:
_A : Dict = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(snake_case_ )
first_sum += 1 / float(snake_case_ )
index += 1
return 1 / first_sum
def lowerCAmelCase_ ( snake_case_ ):
_A : int = 0.00
_A : Dict = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_A : Dict = f'''Resistor at index {index} has a negative value!'''
raise ValueError(snake_case_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self , _a ) -> Any:
_A : Optional[Any] = data
_A : Node[T] | None = None
def __str__( self ) -> str:
return F'''{self.data}'''
class lowercase ( Generic[T] ):
def __init__( self ) -> None:
_A : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
_A : Tuple = self.top
while node:
yield node.data
_A : Union[str, Any] = node.next
def __str__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __len__( self ) -> int:
return len(tuple(iter(self ) ) )
def a__ ( self ) -> bool:
return self.top is None
def a__ ( self , _a ) -> None:
_A : Optional[Any] = Node(_a )
if not self.is_empty():
_A : Optional[Any] = self.top
_A : Union[str, Any] = node
def a__ ( self ) -> T:
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , _a )
_A : List[Any] = self.top
_A : str = self.top.next
return pop_node.data
def a__ ( self ) -> T:
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def a__ ( self ) -> None:
_A : List[Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 54
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 1
|
from __future__ import annotations
import os
from collections.abc import Mapping
_snake_case = tuple[int, int]
class lowercase :
def __init__( self , _a , _a ) -> None:
_A : set[int] = vertices
_A : dict[EdgeT, int] = {
(min(_a ), max(_a )): weight for edge, weight in edges.items()
}
def a__ ( self , _a , _a ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_A : Union[str, Any] = weight
def a__ ( self ) -> Graph:
_A : Graph = Graph({min(self.vertices )} , {} )
_A : EdgeT
_A : int
_A : EdgeT
_A : int
while len(subgraph.vertices ) < len(self.vertices ):
_A : Dict = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_A : List[str] = edge
_A : Any = weight
subgraph.add_edge(_a , _a )
return subgraph
def lowerCAmelCase_ ( snake_case_ = "p107_network.txt" ):
_A : str = os.path.abspath(os.path.dirname(snake_case_ ) )
_A : str = os.path.join(snake_case_,snake_case_ )
_A : dict[EdgeT, int] = {}
_A : list[str]
_A : int
_A : int
with open(snake_case_ ) as f:
_A : Union[str, Any] = f.read().strip().split("""\n""" )
_A : Tuple = [line.split(""",""" ) for line in data]
for edgea in range(1,len(snake_case_ ) ):
for edgea in range(snake_case_ ):
if adjaceny_matrix[edgea][edgea] != "-":
_A : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_A : Graph = Graph(set(range(len(snake_case_ ) ) ),snake_case_ )
_A : Graph = graph.prims_algorithm()
_A : int = sum(graph.edges.values() )
_A : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 1
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 1
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase_ ( *snake_case_ ):
with open(snake_case_,"""r""" ) as fh:
fcntl.flock(snake_case_,fcntl.LOCK_EX )
try:
print(*snake_case_ )
finally:
fcntl.flock(snake_case_,fcntl.LOCK_UN )
_snake_case = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
_snake_case = torch.device("cuda", local_rank)
_snake_case = socket.gethostname()
_snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_snake_case = dist.get_rank()
_snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 54
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = 0 ):
_A : List[Any] = right or len(snake_case_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case_,snake_case_,left + 1,right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : List[Any] = global_rng
_A : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=1 , _a=0.0 , _a=1_6000 , _a=True , _a=80 , _a=16 , _a=64 , _a="hann_window" , _a=80 , _a=7600 , _a=1e-10 , _a=True , ) -> List[str]:
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : int = min_seq_length
_A : Tuple = max_seq_length
_A : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Any = feature_size
_A : List[Any] = padding_value
_A : Tuple = sampling_rate
_A : int = do_normalize
_A : str = num_mel_bins
_A : List[str] = hop_length
_A : Tuple = win_length
_A : List[str] = win_function
_A : List[str] = fmin
_A : Dict = fmax
_A : Tuple = mel_floor
_A : Tuple = return_attention_mask
def a__ ( self ) -> Tuple:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_A : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : int = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
def a__ ( self , _a=False , _a=False ) -> str:
if equal_length:
_A : int = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : str = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[str] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = SpeechTaFeatureExtractor
def a__ ( self ) -> Union[str, Any]:
_A : Any = SpeechTaFeatureExtractionTester(self )
def a__ ( self , _a ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(_a , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a , axis=0 ) - 1 ) < 1e-3 ) )
def a__ ( self ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : int = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : List[Any] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_A : Dict = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : List[str] = feat_extract(_a , return_tensors="""np""" ).input_values
_A : List[str] = feat_extract(_a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
_A : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = ["""longest""", """max_length""", """do_not_pad"""]
_A : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(_a , _a ):
_A : Optional[Any] = feat_extract(_a , padding=_a , max_length=_a , return_tensors="""np""" )
_A : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : List[str] = range(800 , 1400 , 200 )
_A : Any = [floats_list((1, x) )[0] for x in lengths]
_A : List[Any] = ["""longest""", """max_length""", """do_not_pad"""]
_A : Optional[Any] = [None, 1600, None]
for max_length, padding in zip(_a , _a ):
_A : Optional[Any] = feat_extract(_a , max_length=_a , padding=_a )
_A : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def a__ ( self ) -> Tuple:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = feat_extract(
_a , truncation=_a , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
_A : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : str = feat_extract(
_a , truncation=_a , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
_A : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_A : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Dict = feat_extract(
_a , truncation=_a , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
_A : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def a__ ( self ) -> int:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Any = np.random.rand(100 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : List[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_A : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Optional[Any] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(audio_target=_a , padding=_a , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_A : Any = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Dict = feature_extractor(_a , return_tensors="""np""" ).input_values
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Tuple = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_values
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Tuple:
_A : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
_A : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_A : List[Any] = feat_extract.model_input_names[0]
_A : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a , processed_features[input_name] ) ) )
_A : Any = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
_A : Any = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_A : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_A : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def a__ ( self ) -> List[Any]:
_A : Any = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
_A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
_A : Any = feat_extract.model_input_names[0]
_A : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_A : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_A : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def a__ ( self ) -> Any:
_A : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_A : Any = self.feat_extract_tester.prepare_inputs_for_target()
_A : Any = feat_extract.model_input_names[0]
_A : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_A : Dict = feat_extract.num_mel_bins # hack!
_A : Union[str, Any] = feat_extract.pad(_a , padding="""longest""" , return_tensors="""np""" )[input_name]
_A : Union[str, Any] = feat_extract.pad(_a , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def a__ ( self ) -> List[Any]:
_A : List[str] = self.feat_extract_dict
_A : Any = True
_A : List[Any] = self.feature_extraction_class(**_a )
_A : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
_A : str = [len(_a ) for x in speech_inputs]
_A : Dict = feat_extract.model_input_names[0]
_A : str = BatchFeature({input_name: speech_inputs} )
_A : Tuple = feat_extract.num_mel_bins # hack!
_A : Tuple = feat_extract.pad(_a , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = self.feat_extract_dict
_A : int = True
_A : Optional[Any] = self.feature_extraction_class(**_a )
_A : Any = self.feat_extract_tester.prepare_inputs_for_target()
_A : Dict = [len(_a ) for x in speech_inputs]
_A : Tuple = feat_extract.model_input_names[0]
_A : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_A : str = min(_a )
_A : Any = feat_extract.num_mel_bins # hack!
_A : Union[str, Any] = feat_extract.pad(
_a , padding="""max_length""" , max_length=_a , truncation=_a , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def a__ ( self , _a ) -> Tuple:
from datasets import load_dataset
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : int = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
_A : List[str] = self._load_datasamples(1 )
_A : Optional[Any] = SpeechTaFeatureExtractor()
_A : Dict = feature_extractor(_a , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _a , atol=1e-6 ) )
def a__ ( self ) -> Dict:
# fmt: off
_A : Tuple = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Any = SpeechTaFeatureExtractor()
_A : List[str] = feature_extractor(audio_target=_a , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _a , atol=1e-4 ) )
| 54
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
_snake_case = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
_snake_case = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a__ ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def a__ ( self , _a , _a , _a = 1 , _a = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_a , hypotheses=_a , min_len=_a , max_len=_a )
}
| 54
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 1
|
class lowercase :
def __init__( self ) -> Optional[int]:
_A : Optional[Any] = {}
def a__ ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(_a , """ -> """ , """ -> """.join([str(_a ) for j in self.vertex[i]] ) )
def a__ ( self , _a , _a ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_a )
else:
# else make a new vertex
_A : List[str] = [to_vertex]
def a__ ( self ) -> None:
# visited array for storing already visited nodes
_A : Dict = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_a , _a )
def a__ ( self , _a , _a ) -> None:
# mark start vertex as visited
_A : Tuple = True
print(_a , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_a , _a )
if __name__ == "__main__":
_snake_case = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 54
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_ = 1,snake_case_ = 1000 ):
_A : Optional[int] = 1
_A : List[Any] = 0
for divide_by_number in range(snake_case_,digit + 1 ):
_A : list[int] = []
_A : Any = numerator
for _ in range(1,digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(snake_case_ ):
_A : Optional[Any] = len(snake_case_ )
_A : Optional[Any] = divide_by_number
else:
has_been_divided.append(snake_case_ )
_A : Tuple = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
_A : Optional[Any] = 1
for i in range(1,n + 1 ):
# to compute current row from previous row.
_A : int = min(snake_case_,snake_case_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 54
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 1
|
def lowerCAmelCase_ ( ):
for n in range(1,1000000 ):
yield n * (n + 1) // 2
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = 1
_A : str = 2
while i * i <= n:
_A : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase_ ( ):
return next(i for i in triangle_number_generator() if count_divisors(snake_case_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 54
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=1024,snake_case_=1024,snake_case_=False,**snake_case_ ):
_A : Tuple = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = SeqaSeqDataset(snake_case_,snake_case_,snake_case_,snake_case_,type_path="""train""",**snake_case_ )
_A : List[str] = tok.pad_token_id
def get_lens(snake_case_ ):
_A : Union[str, Any] = tqdm(
DataLoader(snake_case_,batch_size=512,num_workers=8,shuffle=snake_case_,collate_fn=ds.collate_fn ),desc=str(ds.len_file ),)
_A : Tuple = []
for batch in dl:
_A : Dict = batch["""input_ids"""].ne(snake_case_ ).sum(1 ).tolist()
_A : Tuple = batch["""labels"""].ne(snake_case_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case_,snake_case_ ):
max_lens.append(max(snake_case_,snake_case_ ) )
else:
max_lens.extend(snake_case_ )
return max_lens
_A : List[Any] = get_lens(snake_case_ )
_A : Any = SeqaSeqDataset(snake_case_,snake_case_,snake_case_,snake_case_,type_path="""val""",**snake_case_ )
_A : Dict = get_lens(snake_case_ )
pickle_save(snake_case_,train_ds.len_file )
pickle_save(snake_case_,val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowercase ( UpperCamelCase__ ):
_a = "altclip_text_model"
def __init__( self , _a=25_0002 , _a=1024 , _a=24 , _a=16 , _a=4096 , _a="gelu" , _a=0.1 , _a=0.1 , _a=514 , _a=1 , _a=0.02 , _a=0.02 , _a=1e-05 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=768 , **_a , ) -> Optional[Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Optional[int] = vocab_size
_A : List[str] = hidden_size
_A : Optional[int] = num_hidden_layers
_A : str = num_attention_heads
_A : Dict = hidden_act
_A : Optional[Any] = intermediate_size
_A : Dict = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : Optional[int] = initializer_range
_A : Any = initializer_factor
_A : List[Any] = layer_norm_eps
_A : Dict = position_embedding_type
_A : int = use_cache
_A : Dict = project_dim
class lowercase ( UpperCamelCase__ ):
_a = "altclip_vision_model"
def __init__( self , _a=768 , _a=3072 , _a=512 , _a=12 , _a=12 , _a=3 , _a=224 , _a=32 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , **_a , ) -> Any:
super().__init__(**_a )
_A : Tuple = hidden_size
_A : Optional[int] = intermediate_size
_A : int = projection_dim
_A : Tuple = num_hidden_layers
_A : str = num_attention_heads
_A : Tuple = num_channels
_A : Union[str, Any] = patch_size
_A : List[str] = image_size
_A : Tuple = initializer_range
_A : Optional[int] = initializer_factor
_A : str = attention_dropout
_A : Dict = layer_norm_eps
_A : Dict = hidden_act
@classmethod
def a__ ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_A , _A : int = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
_A : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class lowercase ( UpperCamelCase__ ):
_a = "altclip"
_a = True
def __init__( self , _a=None , _a=None , _a=768 , _a=2.6592 , **_a ) -> int:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
_A : Union[str, Any] = kwargs.pop("""text_config_dict""" , _a )
_A : Optional[int] = kwargs.pop("""vision_config_dict""" , _a )
super().__init__(**_a )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_A : Optional[int] = {}
# This is the complete result when using `text_config_dict`.
_A : str = AltCLIPTextConfig(**_a ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_A : Dict = (
F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
F'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_A : List[str] = (
F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
F'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(_a )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_A : Dict = {}
# This is the complete result when using `vision_config_dict`.
_A : Union[str, Any] = AltCLIPVisionConfig(**_a ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_A : List[Any] = {
str(_a ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_A : int = (
F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
F'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_A : List[Any] = (
F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
F'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(_a )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_A : Any = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
_A : List[str] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
_A : Tuple = AltCLIPTextConfig(**_a )
_A : List[Any] = AltCLIPVisionConfig(**_a )
_A : Optional[Any] = projection_dim
_A : Tuple = logit_scale_init_value
_A : Dict = 1.0
@classmethod
def a__ ( cls , _a , _a , **_a ) -> Tuple:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def a__ ( self ) -> Tuple:
_A : List[Any] = copy.deepcopy(self.__dict__ )
_A : List[Any] = self.text_config.to_dict()
_A : Dict = self.vision_config.to_dict()
_A : List[Any] = self.__class__.model_type
return output
| 54
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = [int(snake_case_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(snake_case_ ) == 4 and all(0 <= int(snake_case_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_snake_case = input().strip()
_snake_case = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 54
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 1
|
from math import ceil
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = list(range(0,snake_case_ ) )
_A : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_A : Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case_ )
# Missing blocks
_A : List[str] = [i for i in blocks if i not in device_map_blocks]
_A : Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(snake_case_ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = list(range(snake_case_ ) )
_A : Tuple = int(ceil(n_layers / len(snake_case_ ) ) )
_A : int = [layers[i : i + n_blocks] for i in range(0,snake_case_,snake_case_ )]
return dict(zip(snake_case_,snake_case_ ) )
| 54
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 1
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( UpperCamelCase__ ):
def __init__( self , *_a , _a=None , _a=None , **_a ) -> Dict:
super().__init__(*_a , **_a )
_A : Optional[Any] = eval_examples
_A : int = post_process_function
def a__ ( self , _a = None , _a=None , _a = None , _a = "eval" , **_a , ) -> Dict[str, float]:
_A : Tuple = gen_kwargs.copy()
_A : int = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
_A : List[Any] = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
_A : str = gen_kwargs
_A : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
_A : List[str] = self.get_eval_dataloader(_a )
_A : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A : Optional[int] = self.compute_metrics
_A : Dict = None
_A : Any = time.time()
_A : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A : Union[str, Any] = eval_loop(
_a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
_A : List[Any] = compute_metrics
_A : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A : Dict = self.post_process_function(_a , _a , _a )
_A : List[Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A : Optional[int] = metrics.pop(_a )
metrics.update(output.metrics )
else:
_A : Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def a__ ( self , _a , _a , _a=None , _a = "test" , **_a ) -> Union[str, Any]:
_A : List[str] = gen_kwargs.copy()
_A : Optional[int] = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
_A : List[str] = self.compute_metrics
_A : str = None
_A : List[Any] = time.time()
_A : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A : Optional[Any] = eval_loop(
_a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
_A : Any = compute_metrics
_A : Dict = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A : List[str] = self.post_process_function(_a , _a , _a , """predict""" )
_A : Union[str, Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A : int = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 54
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 1
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase ( nn.Module ):
def __init__( self , _a = 16 , _a = 88 , _a = None , _a = 1 , _a = 0.0 , _a = 32 , _a = None , _a = False , _a = None , _a = None , _a = "geglu" , _a = None , ) -> str:
super().__init__()
_A : Union[str, Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_a , attention_head_dim=_a , in_channels=_a , num_layers=_a , dropout=_a , norm_num_groups=_a , cross_attention_dim=_a , attention_bias=_a , sample_size=_a , num_vector_embeds=_a , activation_fn=_a , num_embeds_ada_norm=_a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_A : Dict = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_A : Union[str, Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_A : Tuple = [1, 0]
def a__ ( self , _a , _a , _a=None , _a=None , _a=None , _a = True , ) -> List[Any]:
_A : List[Any] = hidden_states
_A : List[str] = []
_A : Optional[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_A : Tuple = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_A : List[str] = self.transformer_index_for_condition[i]
_A : int = self.transformers[transformer_index](
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , return_dict=_a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_A : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_A : Any = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_a )
| 54
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_A : List[str] = [p / w for p, w in zip(snake_case_,snake_case_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_A : Optional[Any] = sorted(snake_case_ )
# declaring useful variables
_A : Optional[Any] = len(snake_case_ )
_A : List[Any] = 0
_A : int = 0
_A : Dict = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_A : str = sorted_profit_by_weight[length - i - 1]
_A : Any = profit_by_weight.index(snake_case_ )
_A : List[Any] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
_snake_case = [int(x) for x in input("Input profits separated by spaces: ").split()]
_snake_case = [int(x) for x in input("Input weights separated by spaces: ").split()]
_snake_case = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 54
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if exponent == 1:
return base
if exponent % 2 == 0:
_A : Optional[Any] = _modexpt(snake_case_,exponent // 2,snake_case_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(snake_case_,exponent - 1,snake_case_ )) % modulo_value
def lowerCAmelCase_ ( snake_case_ = 1777,snake_case_ = 1855,snake_case_ = 8 ):
_A : Union[str, Any] = base
for _ in range(1,snake_case_ ):
_A : List[str] = _modexpt(snake_case_,snake_case_,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device("cpu")
def lowerCAmelCase_ ( ):
_A : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A : Optional[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
def lowerCAmelCase_ ( snake_case_ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = dct.pop(snake_case_ )
_A : Optional[int] = val
def lowerCAmelCase_ ( snake_case_ ):
_A : int = []
for k in state_dict.keys():
_A : Dict = k
if ".pwconv" in k:
_A : Dict = k_new.replace(""".pwconv""",""".point_wise_conv""" )
if ".dwconv" in k:
_A : int = k_new.replace(""".dwconv""",""".depth_wise_conv""" )
if ".Proj." in k:
_A : Tuple = k_new.replace(""".Proj.""",""".proj.""" )
if "patch_embed" in k_new:
_A : str = k_new.replace("""patch_embed""","""swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_A : Optional[int] = k_new.split(""".""" )
if ls[2].isdigit():
_A : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_A : int = k_new.replace("""network""","""swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_A : Dict = 1000
_A : Optional[int] = """huggingface/label-files"""
_A : Optional[Any] = """imagenet-1k-id2label.json"""
_A : Optional[int] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : Any = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Optional[Any] = idalabel
_A : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_A : Dict = [3, 3, 6, 4]
_A : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_A : List[str] = [3, 3, 9, 6]
_A : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_A : Optional[Any] = [4, 3, 10, 5]
_A : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_A : List[str] = [4, 4, 12, 6]
_A : Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_A : Any = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""",check_hash=snake_case_ )
else:
_A : Union[str, Any] = torch.load(snake_case_,map_location="""cpu""" )
_A : List[str] = checkpoint
_A : List[Any] = create_rename_keys(snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load HuggingFace model
_A : Union[str, Any] = SwiftFormerForImageClassification(snake_case_ ).eval()
hf_model.load_state_dict(snake_case_ )
# prepare test inputs
_A : Tuple = prepare_img()
_A : Dict = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_A : Optional[Any] = processor(images=snake_case_,return_tensors="""pt""" )
# compare outputs from both models
_A : Optional[int] = get_expected_output(snake_case_ )
_A : Union[str, Any] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5],snake_case_,atol=1e-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 54
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 1
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( snake_case_ ):
_A : int = model.config
_A : List[str] = DonutSwinConfig(
image_size=original_config.input_size,patch_size=4,depths=original_config.encoder_layer,num_heads=[4, 8, 16, 32],window_size=original_config.window_size,embed_dim=128,)
_A : str = MBartConfig(
is_decoder=snake_case_,is_encoder_decoder=snake_case_,add_cross_attention=snake_case_,decoder_layers=original_config.decoder_layer,max_position_embeddings=original_config.max_position_embeddings,vocab_size=len(
model.decoder.tokenizer ),scale_embedding=snake_case_,add_final_layer_norm=snake_case_,)
return encoder_config, decoder_config
def lowerCAmelCase_ ( snake_case_ ):
if "encoder.model" in name:
_A : Tuple = name.replace("""encoder.model""","""encoder""" )
if "decoder.model" in name:
_A : List[Any] = name.replace("""decoder.model""","""decoder""" )
if "patch_embed.proj" in name:
_A : str = name.replace("""patch_embed.proj""","""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_A : int = name.replace("""patch_embed.norm""","""embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
_A : Dict = """encoder.""" + name
if "attn.proj" in name:
_A : Dict = name.replace("""attn.proj""","""attention.output.dense""" )
if "attn" in name and "mask" not in name:
_A : Tuple = name.replace("""attn""","""attention.self""" )
if "norm1" in name:
_A : str = name.replace("""norm1""","""layernorm_before""" )
if "norm2" in name:
_A : List[str] = name.replace("""norm2""","""layernorm_after""" )
if "mlp.fc1" in name:
_A : Union[str, Any] = name.replace("""mlp.fc1""","""intermediate.dense""" )
if "mlp.fc2" in name:
_A : Dict = name.replace("""mlp.fc2""","""output.dense""" )
if name == "encoder.norm.weight":
_A : Any = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
_A : Dict = """encoder.layernorm.bias"""
return name
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
for key in orig_state_dict.copy().keys():
_A : str = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
_A : Optional[int] = key.split(""".""" )
_A : Tuple = int(key_split[3] )
_A : Dict = int(key_split[5] )
_A : Any = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_A : Optional[int] = val[:dim, :]
_A : List[Any] = val[dim : dim * 2, :]
_A : Any = val[-dim:, :]
else:
_A : Union[str, Any] = val[:dim]
_A : Optional[Any] = val[dim : dim * 2]
_A : Tuple = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_A : Any = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case_,snake_case_=None,snake_case_=False ):
# load original model
_A : Any = DonutModel.from_pretrained(snake_case_ ).eval()
# load HuggingFace model
_A , _A : Dict = get_configs(snake_case_ )
_A : Dict = DonutSwinModel(snake_case_ )
_A : str = MBartForCausalLM(snake_case_ )
_A : Optional[int] = VisionEncoderDecoderModel(encoder=snake_case_,decoder=snake_case_ )
model.eval()
_A : Dict = original_model.state_dict()
_A : Optional[Any] = convert_state_dict(snake_case_,snake_case_ )
model.load_state_dict(snake_case_ )
# verify results on scanned document
_A : List[str] = load_dataset("""hf-internal-testing/example-documents""" )
_A : Any = dataset["""test"""][0]["""image"""].convert("""RGB""" )
_A : str = XLMRobertaTokenizerFast.from_pretrained(snake_case_,from_slow=snake_case_ )
_A : Tuple = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis,size=original_model.config.input_size[::-1] )
_A : Optional[Any] = DonutProcessor(snake_case_,snake_case_ )
_A : List[str] = processor(snake_case_,return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_A : List[Any] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_A : Any = """When is the coffee break?"""
_A : Union[str, Any] = task_prompt.replace("""{user_input}""",snake_case_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_A : List[str] = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_A : str = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_A : Union[str, Any] = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_A : List[Any] = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_A : List[Any] = """hello world"""
else:
raise ValueError("""Model name not supported""" )
_A : Dict = original_model.decoder.tokenizer(snake_case_,add_special_tokens=snake_case_,return_tensors="""pt""" )[
"""input_ids"""
]
_A : List[Any] = original_model.encoder.model.patch_embed(snake_case_ )
_A , _A : Any = model.encoder.embeddings(snake_case_ )
assert torch.allclose(snake_case_,snake_case_,atol=1e-3 )
# verify encoder hidden states
_A : Optional[Any] = original_model.encoder(snake_case_ )
_A : Union[str, Any] = model.encoder(snake_case_ ).last_hidden_state
assert torch.allclose(snake_case_,snake_case_,atol=1e-2 )
# verify decoder hidden states
_A : List[str] = original_model(snake_case_,snake_case_,snake_case_ ).logits
_A : Union[str, Any] = model(snake_case_,decoder_input_ids=snake_case_ ).logits
assert torch.allclose(snake_case_,snake_case_,atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1],commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1],commit_message="""Update model""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_snake_case = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "swinv2"
_a = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=[2, 2, 6, 2] , _a=[3, 6, 12, 24] , _a=7 , _a=4.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1e-5 , _a=32 , **_a , ) -> List[str]:
super().__init__(**_a )
_A : Optional[int] = image_size
_A : List[Any] = patch_size
_A : List[str] = num_channels
_A : Optional[Any] = embed_dim
_A : List[str] = depths
_A : List[Any] = len(_a )
_A : str = num_heads
_A : Optional[int] = window_size
_A : List[str] = mlp_ratio
_A : Union[str, Any] = qkv_bias
_A : List[Any] = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : List[str] = drop_path_rate
_A : Optional[int] = hidden_act
_A : List[str] = use_absolute_embeddings
_A : Dict = layer_norm_eps
_A : Optional[Any] = initializer_range
_A : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A : str = int(embed_dim * 2 ** (len(_a ) - 1) )
_A : Tuple = (0, 0, 0, 0)
| 54
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 1
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
_A : str = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
_A : Union[str, Any] = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""",snake_case_ )
if matches:
_A : Dict = float(matches[1] )
_A : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_A : Optional[int] = 1001
_A : Union[str, Any] = """imagenet-1k-id2label.json"""
_A : int = """huggingface/label-files"""
_A : int = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : Tuple = {int(snake_case_ ) + 1: v for k, v in idalabel.items()}
_A : Union[str, Any] = """background"""
_A : int = idalabel
_A : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ):
_A : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A : Union[str, Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=False ):
_A : Dict = get_mobilenet_va_config(snake_case_ )
# Load 🤗 model
_A : Optional[Any] = MobileNetVaForImageClassification(snake_case_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(snake_case_,snake_case_,snake_case_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_A : Union[str, Any] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size},size={"""shortest_edge""": config.image_size + 32},)
_A : Any = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : Optional[int] = model(**snake_case_ )
_A : List[str] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
_A : Any = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
_A : Any = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
_A : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print("""Pushing to the hub...""" )
_A : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(snake_case_ )
model.push_to_hub(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 54
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 1
|
from math import sqrt
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = 0
for i in range(1,int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def lowerCAmelCase_ ( snake_case_ = 10000 ):
_A : Optional[Any] = sum(
i
for i in range(1,snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 54
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase ( UpperCamelCase__ ):
_a = "Wav2Vec2FeatureExtractor"
_a = "AutoTokenizer"
def __init__( self , _a , _a ) -> List[str]:
super().__init__(_a , _a )
_A : int = self.feature_extractor
_A : Optional[int] = False
@classmethod
def a__ ( cls , _a , **_a ) -> str:
try:
return super().from_pretrained(_a , **_a )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , _a , )
_A : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_a , **_a )
_A : str = WavaVecaCTCTokenizer.from_pretrained(_a , **_a )
return cls(feature_extractor=_a , tokenizer=_a )
def __call__( self , *_a , **_a ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_A : Dict = kwargs.pop("""raw_speech""" )
else:
_A : int = kwargs.pop("""audio""" , _a )
_A : str = kwargs.pop("""sampling_rate""" , _a )
_A : Tuple = kwargs.pop("""text""" , _a )
if len(_a ) > 0:
_A : Dict = args[0]
_A : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_A : Optional[Any] = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
_A : str = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_A : Dict = encodings["""input_ids"""]
return inputs
def a__ ( self , *_a , **_a ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
_A : Optional[Any] = kwargs.pop("""input_features""" , _a )
_A : Dict = kwargs.pop("""labels""" , _a )
if len(_a ) > 0:
_A : Union[str, Any] = args[0]
_A : List[Any] = args[1:]
if input_features is not None:
_A : Dict = self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
_A : List[Any] = self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_A : List[str] = labels["""input_ids"""]
return input_features
def a__ ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def a__ ( self ) -> Optional[Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_A : List[str] = True
_A : Optional[int] = self.tokenizer
yield
_A : List[str] = self.feature_extractor
_A : str = False
| 54
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = len(snake_case_ )
print("""The following activities are selected:""" )
# The first activity is always selected
_A : Tuple = 0
print(snake_case_,end=""",""" )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_,end=""",""" )
_A : Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = [1, 3, 0, 5, 8, 5]
_snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 54
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 1
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "AutoImageProcessor"
_a = "AutoTokenizer"
def __init__( self , _a , _a ) -> Dict:
super().__init__(_a , _a )
_A : Tuple = self.image_processor
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> List[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_A : Dict = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
_A : Union[str, Any] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[int]:
return ["input_ids", "attention_mask", "pixel_values"]
| 54
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_snake_case = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
_snake_case = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("utf-8").split()
_snake_case = "|".join(sys.argv[1:])
_snake_case = re.compile(rf"""^({joined_dirs}).*?\.py$""")
_snake_case = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = os.path.join(args.tf_model_dir,"""parameters.json""" )
_A : Optional[Any] = json.loads(open(snake_case_ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(""".pt""" ):
_A : Union[str, Any] = args.output + """.pt"""
_A : List[Any] = OrderedDict()
with tf.device("""/CPU:0""" ):
_A : Optional[Any] = tf.train.load_checkpoint(args.tf_model_dir )
_A : Optional[int] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A : Dict = reader.get_tensor(snake_case_ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
_A : List[Any] = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
_A : List[str] = 8
_A : Dict = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A : List[str] = torch.tensor(snake_case_ )
elif key_name.startswith("""model/moe""" ):
_A : Optional[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
_A : Optional[int] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
_A : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A : int = torch.tensor(snake_case_ )
elif key_name.endswith("""/softmlp/kernel""" ):
_A : Optional[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
_A : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A : Optional[int] = torch.tensor(snake_case_ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
_A : List[str] = key_name[-9:-7]
for i in range(16 ):
_A : Dict = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
_A : Optional[int] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A : Optional[Any] = torch.tensor(snake_case_ )
elif key_name.startswith("""model/mlp""" ):
_A : Union[str, Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
_A : Tuple = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
_A : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A : List[str] = torch.tensor(snake_case_ )
elif key_name.endswith("""/p1/bias""" ):
_A : List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
_A : Optional[Any] = vnp.copy() # same because it is one dimensional
_A : Optional[Any] = torch.tensor(snake_case_ )
elif key_name.endswith("""/p2/kernel""" ):
_A : Optional[Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
_A : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A : int = torch.tensor(snake_case_ )
elif key_name.endswith("""/p2/bias""" ):
_A : List[Any] = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
_A : List[str] = vnp.copy() # same because it is one dimensional
_A : List[str] = torch.tensor(snake_case_ )
elif key_name.startswith("""model/ln""" ):
_A : Tuple = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_A : Any = """model.blocks.%d.feed_forward.norm.bias""" % player
_A : Optional[int] = vnp.copy() # same because it is one dimensional
_A : str = torch.tensor(snake_case_ )
elif key_name.endswith("""/g""" ):
_A : int = """model.blocks.%d.feed_forward.norm.weight""" % player
_A : List[Any] = vnp.copy() # same because it is one dimensional
_A : Tuple = torch.tensor(snake_case_ )
elif key_name.startswith("""model/att""" ):
_A : Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
_A : Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A : List[Any] = state[:, 0, :, :]
_A : Any = state[:, 1, :, :]
_A : int = state[:, 2, :, :]
_A : List[str] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A : str = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A : Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A : Any = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
_A : Optional[Any] = torch.tensor(snake_case_ )
_A : Optional[int] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
_A : int = torch.tensor(snake_case_ )
_A : Optional[int] = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
_A : Any = torch.tensor(snake_case_ )
elif key_name.endswith("""/o/kernel""" ):
_A : int = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
_A : int = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A : List[str] = torch.tensor(snake_case_ )
elif key_name.startswith("""model/an""" ):
_A : Dict = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_A : Optional[Any] = """model.blocks.%d.self_attn.norm.bias""" % player
_A : Dict = vnp.copy() # same because it is one dimensional
_A : int = torch.tensor(snake_case_ )
elif key_name.endswith("""/g""" ):
_A : Tuple = """model.blocks.%d.self_attn.norm.weight""" % player
_A : Union[str, Any] = vnp.copy() # same because it is one dimensional
_A : Optional[Any] = torch.tensor(snake_case_ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
_A : Union[str, Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
_A : Dict = """model.%s.weight""" % nlayer
_A : Tuple = vnp.copy() # same in embedded
_A : Union[str, Any] = torch.tensor(snake_case_ )
if key_name.startswith("""model/wte""" ):
_A : Dict = """lm_head.weight"""
_A : Union[str, Any] = vnp.copy() # same in embedded
_A : Optional[int] = torch.tensor(snake_case_ )
elif key_name.startswith("""model/wob""" ):
_A : Union[str, Any] = """final_logits_bias"""
_A : Tuple = vnp.copy() # same in embedded
_A : Optional[Any] = state.reshape((1, -1) )
_A : str = torch.tensor(snake_case_ )
elif key_name == "model/dense/kernel":
_A : Dict = """model.last_project.weight"""
_A : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A : Optional[Any] = torch.tensor(snake_case_ )
elif key_name == "model/dense_1/bias":
_A : Dict = """model.last_project.bias"""
_A : Union[str, Any] = vnp.copy() # same because it is one dimensional
_A : List[str] = torch.tensor(snake_case_ )
torch.save(snake_case_,args.output )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
_snake_case = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
from scipy.stats import spearmanr
import datasets
_snake_case = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
_snake_case = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
_snake_case = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a__ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def a__ ( self , _a , _a , _a=False ) -> Dict:
_A : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 1
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCAmelCase_ ( snake_case_ ):
_A : str = []
for line in lines:
_A : Dict = re.sub(r"""#.*""","""""",snake_case_ ) # remove comments
if line:
filtered_lines.append(snake_case_ )
_A : Dict = """\n""".join(snake_case_ )
# Make a hash from all this code
_A : Dict = full_str.encode("""utf-8""" )
return shaaaa(snake_case_ ).hexdigest()
# get importable module names and hash for caching
_snake_case = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_snake_case = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_snake_case = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_snake_case = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 54
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 1
|
import argparse
import os
import re
import packaging.version
_snake_case = "examples/"
_snake_case = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
_snake_case = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
_snake_case = "README.md"
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
with open(snake_case_,"""r""",encoding="""utf-8""",newline="""\n""" ) as f:
_A : Optional[Any] = f.read()
_A , _A : Tuple = REPLACE_PATTERNS[pattern]
_A : Union[str, Any] = replace.replace("""VERSION""",snake_case_ )
_A : Dict = re_pattern.sub(snake_case_,snake_case_ )
with open(snake_case_,"""w""",encoding="""utf-8""",newline="""\n""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
for folder, directories, fnames in os.walk(snake_case_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(snake_case_,snake_case_ ),snake_case_,pattern="""examples""" )
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case_,snake_case_,snake_case_ )
if not patch:
update_version_in_examples(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Any = """🤗 Transformers currently provides the following architectures"""
_A : int = """1. Want to contribute a new model?"""
with open(snake_case_,"""r""",encoding="""utf-8""",newline="""\n""" ) as f:
_A : Union[str, Any] = f.readlines()
# Find the start of the list.
_A : Dict = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_A : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_A : Optional[int] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""","""https://huggingface.co/docs/transformers/model_doc""",)
index += 1
with open(snake_case_,"""w""",encoding="""utf-8""",newline="""\n""" ) as f:
f.writelines(snake_case_ )
def lowerCAmelCase_ ( ):
with open(REPLACE_FILES["""init"""],"""r""" ) as f:
_A : Dict = f.read()
_A : str = REPLACE_PATTERNS["""init"""][0].search(snake_case_ ).groups()[0]
return packaging.version.parse(snake_case_ )
def lowerCAmelCase_ ( snake_case_=False ):
_A : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_A : Optional[Any] = default_version.base_version
elif patch:
_A : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_A : str = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_A : Dict = input(f'''Which version are you releasing? [{default_version}]''' )
if len(snake_case_ ) == 0:
_A : List[str] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(snake_case_,patch=snake_case_ )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def lowerCAmelCase_ ( ):
_A : List[str] = get_version()
_A : Union[str, Any] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_A : List[str] = current_version.base_version
# Check with the user we got that right.
_A : Union[str, Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(snake_case_ ) == 0:
_A : Union[str, Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(snake_case_ )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
_snake_case = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 54
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 1
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_snake_case = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
_snake_case = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_snake_case = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_snake_case = "allenai"
def lowerCAmelCase_ ( snake_case_ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_A : Union[str, Any] = dict((re.sub(r"""@@$""","""""",snake_case_ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""","""</w>""",snake_case_ ), v) for k, v in d.items() )
_A : Optional[int] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
_A : Union[str, Any] = d[k] # restore
return da
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# prep
assert os.path.exists(snake_case_ )
os.makedirs(snake_case_,exist_ok=snake_case_ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_A : Any = basename(snake_case_ )
_A : Dict = dirname(snake_case_ )
_A : str = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_A : Tuple = cls.hub_models()
_A : str = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
_A : Tuple = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
_A : List[str] = hub_utils.from_pretrained(
snake_case_,snake_case_,snake_case_,archive_map=snake_case_,**snake_case_ )
_A : Tuple = vars(chkpt["""args"""]["""model"""] )
_A : List[Any] = args["""source_lang"""]
_A : Optional[Any] = args["""target_lang"""]
_A : List[str] = dirname(snake_case_ )
_A : Optional[Any] = basename(snake_case_ )
# dicts
_A : List[Any] = os.path.join(snake_case_,f'''dict.{src_lang}.txt''' )
_A : List[Any] = os.path.join(snake_case_,f'''dict.{tgt_lang}.txt''' )
_A : Optional[int] = Dictionary.load(snake_case_ )
_A : Any = rewrite_dict_keys(src_dict.indices )
_A : int = len(snake_case_ )
_A : int = os.path.join(snake_case_,"""vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,ensure_ascii=snake_case_,indent=snake_case_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_A : int = True
for k in src_vocab.keys():
if not k.islower():
_A : str = False
break
_A : List[str] = Dictionary.load(snake_case_ )
_A : Tuple = rewrite_dict_keys(tgt_dict.indices )
_A : Union[str, Any] = len(snake_case_ )
_A : str = os.path.join(snake_case_,"""vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,ensure_ascii=snake_case_,indent=snake_case_ ) )
# merges_file (bpecodes)
_A : Optional[int] = os.path.join(snake_case_,VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_A : Union[str, Any] = os.path.join(snake_case_,snake_case_ )
if os.path.exists(snake_case_ ):
break
with open(snake_case_,encoding="""utf-8""" ) as fin:
_A : Any = fin.read()
_A : Optional[int] = re.sub(r""" \d+$""","""""",snake_case_,0,re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as fout:
fout.write(snake_case_ )
# model config
_A : List[Any] = os.path.join(snake_case_,"""config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
_A : Tuple = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
_A : int = 5
_A : int = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_A : Tuple = best_score_hparams[model_dir]["""length_penalty"""]
else:
_A : Optional[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,ensure_ascii=snake_case_,indent=snake_case_ ) )
# tokenizer config
_A : Dict = os.path.join(snake_case_,snake_case_ )
_A : Any = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,ensure_ascii=snake_case_,indent=snake_case_ ) )
# model
_A : Tuple = chkpt["""models"""][0]
_A : Union[str, Any] = model.state_dict()
# rename keys to start with 'model.'
_A : Union[str, Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_A : Optional[int] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(snake_case_,snake_case_ )
_A : Union[str, Any] = FSMTConfig.from_pretrained(snake_case_ )
_A : Any = FSMTForConditionalGeneration(snake_case_ )
# check that it loads ok
model_new.load_state_dict(snake_case_,strict=snake_case_ )
# save
_A : str = os.path.join(snake_case_,snake_case_ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 54
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_ ):
if len(snake_case_ ) <= 1:
return [tuple(snake_case_ )]
_A : str = []
def generate(snake_case_,snake_case_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1,snake_case_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_A , _A : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_A , _A : Dict = arr[k - 1], arr[0]
generate(k - 1,snake_case_ )
generate(len(snake_case_ ),snake_case_ )
return res
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
_A : Tuple = tempfile.mkdtemp()
_A : List[str] = BlipImageProcessor()
_A : List[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A : List[str] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A : Any = InstructBlipProcessor(_a , _a , _a )
processor.save_pretrained(self.tmpdirname )
def a__ ( self , **_a ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def a__ ( self , **_a ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def a__ ( self , **_a ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).qformer_tokenizer
def a__ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Optional[Any]:
_A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_A : Optional[int] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A : str = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_A : Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
self.assertIsInstance(processor.qformer_tokenizer , _a )
def a__ ( self ) -> Any:
_A : Tuple = self.get_image_processor()
_A : List[str] = self.get_tokenizer()
_A : Tuple = self.get_qformer_tokenizer()
_A : Optional[int] = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Union[str, Any] = self.prepare_image_inputs()
_A : str = image_processor(_a , return_tensors="""np""" )
_A : Optional[int] = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self ) -> Any:
_A : Optional[int] = self.get_image_processor()
_A : Tuple = self.get_tokenizer()
_A : Any = self.get_qformer_tokenizer()
_A : str = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Union[str, Any] = """lower newer"""
_A : Dict = processor(text=_a )
_A : Optional[int] = tokenizer(_a , return_token_type_ids=_a )
_A : int = qformer_tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def a__ ( self ) -> Tuple:
_A : Dict = self.get_image_processor()
_A : str = self.get_tokenizer()
_A : List[Any] = self.get_qformer_tokenizer()
_A : str = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Any = """lower newer"""
_A : Union[str, Any] = self.prepare_image_inputs()
_A : List[Any] = processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def a__ ( self ) -> str:
_A : str = self.get_image_processor()
_A : List[Any] = self.get_tokenizer()
_A : List[Any] = self.get_qformer_tokenizer()
_A : Dict = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A : List[Any] = processor.batch_decode(_a )
_A : List[str] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.get_image_processor()
_A : Union[str, Any] = self.get_tokenizer()
_A : int = self.get_qformer_tokenizer()
_A : Dict = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Union[str, Any] = """lower newer"""
_A : Any = self.prepare_image_inputs()
_A : List[Any] = processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 54
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 1
|
import numpy as np
class lowercase :
def __init__( self ) -> Optional[Any]:
_A : Tuple = (0, 0)
_A : Tuple = None
_A : Tuple = 0
_A : Tuple = 0
_A : Optional[int] = 0
def __eq__( self , _a ) -> Optional[int]:
return self.position == cell.position
def a__ ( self ) -> Tuple:
print(self.position )
class lowercase :
def __init__( self , _a=(5, 5) ) -> List[str]:
_A : List[str] = np.zeros(_a )
_A : Union[str, Any] = world_size[0]
_A : Optional[int] = world_size[1]
def a__ ( self ) -> Optional[Any]:
print(self.w )
def a__ ( self , _a ) -> Optional[int]:
_A : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_A : Union[str, Any] = cell.position[0]
_A : Tuple = cell.position[1]
_A : List[Any] = []
for n in neughbour_cord:
_A : Any = current_x + n[0]
_A : int = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_A : str = Cell()
_A : Optional[Any] = (x, y)
_A : Union[str, Any] = cell
neighbours.append(_a )
return neighbours
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = []
_A : Tuple = []
_open.append(snake_case_ )
while _open:
_A : Any = np.argmin([n.f for n in _open] )
_A : Any = _open[min_f]
_closed.append(_open.pop(snake_case_ ) )
if current == goal:
break
for n in world.get_neigbours(snake_case_ ):
for c in _closed:
if c == n:
continue
_A : Optional[int] = current.g + 1
_A , _A : List[Any] = n.position
_A , _A : Optional[int] = goal.position
_A : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
_A : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(snake_case_ )
_A : Tuple = []
while current.parent is not None:
path.append(current.position )
_A : str = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_snake_case = Gridworld()
# Start position and goal
_snake_case = Cell()
_snake_case = (0, 0)
_snake_case = Cell()
_snake_case = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
_snake_case = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_snake_case = 1
print(world.w)
| 54
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 1
|
import re
from filelock import FileLock
try:
import nltk
_snake_case = True
except (ImportError, ModuleNotFoundError):
_snake_case = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCAmelCase_ ( snake_case_ ):
re.sub("""<n>""","""""",snake_case_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case_ ) )
| 54
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 1
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
_A : List[Any] = os.path.abspath(snake_case_ )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
_A : Union[str, Any] = torch.load(snake_case_,map_location="""cpu""" )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
_A : List[str] = convert_pytorch_state_dict_to_flax(snake_case_,snake_case_ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_A : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(snake_case_,snake_case_ )
return flax_state_dict
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
def is_key_or_prefix_key_in_dict(snake_case_ ) -> bool:
return len(set(snake_case_ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_A : int = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_A : Optional[int] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_A : Union[str, Any] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case_ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_A : str = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case_ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_A : str = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case_ ):
_A : Optional[Any] = pt_tensor.transpose(2,3,1,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_A : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case_ ):
_A : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_A : Tuple = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_A : str = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_A : Tuple = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_A : Optional[int] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_A : Optional[Any] = pt_tuple_key[-2] + """_v"""
if name is not None:
_A : List[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# convert pytorch tensor to numpy
_A : Any = {k: v.numpy() for k, v in pt_state_dict.items()}
_A : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_A : List[Any] = flax_model.params["""params"""]
else:
_A : Dict = flax_model.params
_A : Optional[int] = flatten_dict(snake_case_ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_A : str = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(snake_case_ )
_A : Any = {}
_A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_A : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_A : Dict = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_A : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_A : List[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_A , _A : Dict = rename_key_and_reshape_tensor(
snake_case_,snake_case_,snake_case_,snake_case_ )
# add model prefix if necessary
_A : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_A : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_A : Tuple = jnp.asarray(snake_case_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case_,snake_case_ )
continue
# also add unexpected weight so that warning is thrown
_A : Any = jnp.asarray(snake_case_ )
else:
# also add unexpected weight so that warning is thrown
_A : List[Any] = jnp.asarray(snake_case_ )
return unflatten_dict(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
import torch
# Load the index
_A : List[Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
_A : Union[str, Any] = torch.load(snake_case_ )
_A : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
_A : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_A : int = flax_model.params["""params"""]
_A : Dict = flatten_dict(snake_case_ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
_A : int = flax_model.params
_A : Optional[Any] = flatten_dict(snake_case_ )
_A : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_A : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_A : Any = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_A : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_A : List[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_A , _A : Union[str, Any] = rename_key_and_reshape_tensor(
snake_case_,snake_case_,snake_case_,snake_case_ )
# add model prefix if necessary
_A : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_A : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_A : Optional[Any] = jnp.asarray(snake_case_ )
continue
if "var" in flax_key[-1]:
_A : Any = jnp.asarray(snake_case_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case_,snake_case_ )
continue
# also add unexpected weight so that warning is thrown
_A : List[str] = jnp.asarray(snake_case_ )
else:
# also add unexpected weight so that warning is thrown
_A : Dict = jnp.asarray(snake_case_ )
return unflatten_dict(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = os.path.abspath(snake_case_ )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
_A : Optional[Any] = getattr(snake_case_,"""Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(snake_case_,"""rb""" ) as state_f:
try:
_A : Dict = from_bytes(snake_case_,state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_A : int = flatten_dict(jax.tree_util.tree_map(lambda snake_case_ : x.dtype == jnp.bfloataa,snake_case_ ) ).values()
if any(snake_case_ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_A : str = jax.tree_util.tree_map(
lambda snake_case_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params,snake_case_ )
_A : Dict = flatten_dict(snake_case_ )
_A : str = pt_model.state_dict()
_A : List[str] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
_A : List[str] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_A : List[Any] = []
_A : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_A : Dict = flax_key_tuple[0] == pt_model.base_model_prefix
_A : List[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_A : List[Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_A : List[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case_ ) not in pt_model_dict:
# conv layer
_A : str = flax_key_tuple[:-1] + ("""weight""",)
_A : List[str] = jnp.transpose(snake_case_,(3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case_ ) not in pt_model_dict:
# linear layer
_A : List[str] = flax_key_tuple[:-1] + ("""weight""",)
_A : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_A : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_A : Optional[Any] = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
_A : List[Any] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
_A : Optional[int] = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_A : List[Any] = """.""".join(snake_case_ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_A : Optional[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_A : Optional[Any] = key.split(""".""" )
_A : List[str] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_A : str = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
_A : Any = key_components[-2] + """_v"""
if name is not None:
_A : int = key_components[:-3] + [name]
_A : List[str] = """.""".join(snake_case_ )
_A : int = key
if flax_key in special_pt_names:
_A : List[str] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
_A : Union[str, Any] = np.asarray(snake_case_ ) if not isinstance(snake_case_,np.ndarray ) else flax_tensor
_A : int = torch.from_numpy(snake_case_ )
# remove from missing keys
missing_keys.remove(snake_case_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case_ )
pt_model.load_state_dict(snake_case_ )
# re-transform missing_keys to list
_A : Union[str, Any] = list(snake_case_ )
if len(snake_case_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(snake_case_ ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"""If your task is similar to the task the model of the checkpoint was trained on, """
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 54
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 1
|
from manim import *
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Tuple:
_A : Any = Rectangle(height=0.5 , width=0.5 )
_A : str = Rectangle(height=0.25 , width=0.25 )
_A : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A : Union[str, Any] = [mem.copy() for i in range(6 )]
_A : Tuple = [mem.copy() for i in range(6 )]
_A : Dict = VGroup(*_a ).arrange(_a , buff=0 )
_A : List[Any] = VGroup(*_a ).arrange(_a , buff=0 )
_A : Optional[int] = VGroup(_a , _a ).arrange(_a , buff=0 )
_A : Dict = Text("""CPU""" , font_size=24 )
_A : int = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_A : List[Any] = [mem.copy() for i in range(4 )]
_A : List[Any] = VGroup(*_a ).arrange(_a , buff=0 )
_A : Optional[Any] = Text("""GPU""" , font_size=24 )
_A : int = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_A : List[Any] = [mem.copy() for i in range(6 )]
_A : Optional[int] = VGroup(*_a ).arrange(_a , buff=0 )
_A : Tuple = Text("""Model""" , font_size=24 )
_A : int = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_A : Optional[int] = []
_A : Tuple = []
_A : List[Any] = []
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_A : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_A : List[Any] = [mem.copy() for i in range(6 )]
_A : Any = VGroup(*_a ).arrange(_a , buff=0 )
_A : Any = Text("""Loaded Checkpoint""" , font_size=24 )
_A : str = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_A : int = []
_A : List[str] = []
for i, rect in enumerate(_a ):
_A : Optional[Any] = fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_A : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_A : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A : Optional[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_A : Optional[Any] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_A : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
_A : List[Any] = [meta_mem.copy() for i in range(6 )]
_A : List[str] = [meta_mem.copy() for i in range(6 )]
_A : Union[str, Any] = VGroup(*_a ).arrange(_a , buff=0 )
_A : Tuple = VGroup(*_a ).arrange(_a , buff=0 )
_A : Any = VGroup(_a , _a ).arrange(_a , buff=0 )
_A : Tuple = Text("""Disk""" , font_size=24 )
_A : int = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_A : Optional[int] = []
for i, rect in enumerate(_a ):
_A : Optional[int] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_A : List[Any] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait()
| 54
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 54
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 1
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> str:
_A : Optional[int] = parent
_A : List[Any] = batch_size
_A : List[str] = image_size
_A : Optional[int] = num_channels
_A : Optional[Any] = num_stages
_A : Optional[int] = hidden_sizes
_A : Optional[Any] = depths
_A : int = is_training
_A : Any = use_labels
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : Union[str, Any] = num_labels
_A : Dict = initializer_range
_A : Dict = out_features
_A : List[str] = out_indices
_A : int = scope
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Optional[Any] = None
if self.use_labels:
_A : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Dict:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> Any:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : List[str] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : Dict = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Dict = None
_A : Dict = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> Any:
_A : List[str] = self.prepare_config_and_inputs()
_A , _A , _A : Tuple = config_and_inputs
_A : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : Tuple = ConvNextModelTester(self )
_A : Optional[int] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Any:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[str]:
pass
def a__ ( self ) -> Optional[int]:
_A , _A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Any = model_class(_a )
_A : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Optional[int] = [*signature.parameters.keys()]
_A : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> int:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Union[str, Any]:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Optional[Any]:
def check_hidden_states_output(_a , _a , _a ):
_A : Any = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> List[str]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> List[str]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> Dict:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> List[str]:
_A : List[Any] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : Optional[Any] = self.default_image_processor
_A : Union[str, Any] = prepare_img()
_A : int = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Tuple = model(**_a )
# verify the logits
_A : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : str = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = ConvNextModelTester(self )
| 54
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 1
|
import random
from .binary_exp_mod import bin_exp_mod
def lowerCAmelCase_ ( snake_case_,snake_case_=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_A : List[str] = n - 1
_A : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_A : int = 0
while count < prec:
_A : Any = random.randint(2,n - 1 )
_A : Any = bin_exp_mod(snake_case_,snake_case_,snake_case_ )
if b != 1:
_A : List[Any] = True
for _ in range(snake_case_ ):
if b == n - 1:
_A : List[str] = False
break
_A : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 1
|
class lowercase :
def __init__( self , _a ) -> List[str]:
_A : Any = val
_A : int = None
_A : Union[str, Any] = None
def a__ ( self , _a ) -> Optional[int]:
if self.val:
if val < self.val:
if self.left is None:
_A : Optional[Any] = Node(_a )
else:
self.left.insert(_a )
elif val > self.val:
if self.right is None:
_A : Dict = Node(_a )
else:
self.right.insert(_a )
else:
_A : str = val
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Recursive traversal
if root:
inorder(root.left,snake_case_ )
res.append(root.val )
inorder(root.right,snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
# Build BST
if len(snake_case_ ) == 0:
return arr
_A : Optional[int] = Node(arr[0] )
for i in range(1,len(snake_case_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
_A : List[Any] = []
inorder(snake_case_,snake_case_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 54
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 1
|
import csv
import tweepy
# Twitter API credentials
_snake_case = ""
_snake_case = ""
_snake_case = ""
_snake_case = ""
def lowerCAmelCase_ ( snake_case_ ):
# authorize twitter, initialize tweepy
_A : str = tweepy.OAuthHandler(snake_case_,snake_case_ )
auth.set_access_token(snake_case_,snake_case_ )
_A : int = tweepy.API(snake_case_ )
# initialize a list to hold all the tweepy Tweets
_A : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_A : List[str] = api.user_timeline(screen_name=snake_case_,count=200 )
# save most recent tweets
alltweets.extend(snake_case_ )
# save the id of the oldest tweet less one
_A : List[str] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(snake_case_ ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_A : int = api.user_timeline(
screen_name=snake_case_,count=200,max_id=snake_case_ )
# save most recent tweets
alltweets.extend(snake_case_ )
# update the id of the oldest tweet less one
_A : Union[str, Any] = alltweets[-1].id - 1
print(f'''...{len(snake_case_ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_A : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''',"""w""" ) as f:
_A : List[Any] = csv.writer(snake_case_ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(snake_case_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 54
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 1
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ ):
return np.dot(snake_case_,snake_case_ )
class lowercase :
def __init__( self , *,
_a = np.inf , _a = "linear" , _a = 0.0 , ) -> None:
_A : Tuple = regularization
_A : str = gamma
if kernel == "linear":
_A : str = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
_A : List[str] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_A : Optional[Any] = F'''Unknown kernel: {kernel}'''
raise ValueError(_a )
def a__ ( self , _a , _a ) -> float:
return np.dot(_a , _a )
def a__ ( self , _a , _a ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a__ ( self , _a , _a ) -> None:
_A : int = observations
_A : int = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((_A) , ) : Optional[int] = np.shape(_a )
def to_minimize(_a ) -> float:
_A : Union[str, Any] = 0
((_A) , ) : Optional[Any] = np.shape(_a )
for i in range(_a ):
for j in range(_a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_a )
_A : Any = LinearConstraint(_a , 0 , 0 )
_A : Optional[Any] = Bounds(0 , self.regularization )
_A : Union[str, Any] = minimize(
_a , np.ones(_a ) , bounds=_a , constraints=[ly_contraint] ).x
_A : Dict = l_star
# calculating mean offset of separation plane to points
_A : Tuple = 0
for i in range(_a ):
for j in range(_a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_A : List[str] = s / n
def a__ ( self , _a ) -> int:
_A : Union[str, Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 1
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 54
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 1
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_snake_case = logging.getLogger(__name__)
_snake_case = tf.data.AUTOTUNE
def lowerCAmelCase_ ( ):
_A : Tuple = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""",type=snake_case_,default="""roberta-base""",help="""The model config to use. Note that we don't copy the model's weights, only the config!""",)
parser.add_argument(
"""--tokenizer""",type=snake_case_,default="""unigram-tokenizer-wikitext""",help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""",)
parser.add_argument(
"""--per_replica_batch_size""",type=snake_case_,default=8,help="""Batch size per TPU core.""",)
parser.add_argument(
"""--no_tpu""",action="""store_true""",help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""",)
parser.add_argument(
"""--tpu_name""",type=snake_case_,help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""",default="""local""",)
parser.add_argument(
"""--tpu_zone""",type=snake_case_,help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""",)
parser.add_argument(
"""--gcp_project""",type=snake_case_,help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""",action="""store_true""",help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""",)
parser.add_argument(
"""--train_dataset""",type=snake_case_,help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""",)
parser.add_argument(
"""--shuffle_buffer_size""",type=snake_case_,default=2**18,help="""Size of the shuffle buffer (in samples)""",)
parser.add_argument(
"""--eval_dataset""",type=snake_case_,help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""",)
parser.add_argument(
"""--num_epochs""",type=snake_case_,default=1,help="""Number of epochs to train for.""",)
parser.add_argument(
"""--learning_rate""",type=snake_case_,default=1e-4,help="""Learning rate to use for training.""",)
parser.add_argument(
"""--weight_decay_rate""",type=snake_case_,default=1e-3,help="""Weight decay rate to use for training.""",)
parser.add_argument(
"""--max_length""",type=snake_case_,default=512,help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""",)
parser.add_argument(
"""--mlm_probability""",type=snake_case_,default=0.15,help="""Fraction of tokens to mask during training.""",)
parser.add_argument("""--output_dir""",type=snake_case_,required=snake_case_,help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""",type=snake_case_,help="""Model ID to upload to on the Hugging Face Hub.""" )
_A : Any = parser.parse_args()
return args
def lowerCAmelCase_ ( snake_case_ ):
try:
if args.tpu_name:
_A : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name,zone=args.tpu_zone,project=args.gcp_project )
else:
_A : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(snake_case_ )
tf.tpu.experimental.initialize_tpu_system(snake_case_ )
return tpu
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = 0
for file in file_list:
_A : Optional[Any] = file.split("""/""" )[-1]
_A : List[Any] = re.search(r"""-\d+-(\d+)\.tfrecord""",snake_case_ ).group(1 )
_A : Dict = int(snake_case_ )
num_samples += sample_count
return num_samples
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_=None ):
_A : Tuple = count_samples(snake_case_ )
_A : List[str] = tf.data.Dataset.from_tensor_slices(snake_case_ )
if shuffle:
_A : Union[str, Any] = dataset.shuffle(len(snake_case_ ) )
_A : Dict = tf.data.TFRecordDataset(snake_case_,num_parallel_reads=snake_case_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_A : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(snake_case_ ) )
_A : int = dataset.map(snake_case_,num_parallel_calls=snake_case_ )
if shuffle:
assert shuffle_buffer_size is not None
_A : int = dataset.shuffle(args.shuffle_buffer_size )
_A : str = dataset.batch(snake_case_,drop_remainder=snake_case_ )
_A : Tuple = dataset.map(snake_case_,num_parallel_calls=snake_case_ )
_A : Any = dataset.prefetch(snake_case_ )
return dataset
def lowerCAmelCase_ ( snake_case_ ):
if not args.no_tpu:
_A : Dict = initialize_tpu(snake_case_ )
_A : str = tf.distribute.TPUStrategy(snake_case_ )
else:
_A : Optional[int] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
_A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
_A : int = AutoConfig.from_pretrained(args.pretrained_model_config )
_A : int = tokenizer.vocab_size
_A : Tuple = tf.io.gfile.glob(os.path.join(args.train_dataset,"""*.tfrecord""" ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
_A : Dict = tf.io.gfile.glob(os.path.join(args.eval_dataset,"""*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
_A : Optional[Any] = count_samples(snake_case_ )
_A : List[str] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_A : Any = steps_per_epoch * args.num_epochs
with strategy.scope():
_A : List[str] = TFAutoModelForMaskedLM.from_config(snake_case_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_A , _A : List[str] = create_optimizer(
num_train_steps=snake_case_,num_warmup_steps=total_train_steps // 20,init_lr=args.learning_rate,weight_decay_rate=args.weight_decay_rate,)
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=snake_case_,metrics=["""accuracy"""] )
def decode_fn(snake_case_ ):
_A : Union[str, Any] = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa,shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa,shape=(args.max_length,) ),
}
return tf.io.parse_single_example(snake_case_,snake_case_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_A : List[Any] = DataCollatorForLanguageModeling(
tokenizer=snake_case_,mlm_probability=args.mlm_probability,mlm=snake_case_,return_tensors="""tf""" )
def mask_with_collator(snake_case_ ):
# TF really needs an isin() function
_A : Tuple = (
~tf.cast(batch["""attention_mask"""],tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
_A , _A : str = data_collator.tf_mask_tokens(
batch["""input_ids"""],vocab_size=len(snake_case_ ),mask_token_id=tokenizer.mask_token_id,special_tokens_mask=snake_case_,)
return batch
_A : List[str] = args.per_replica_batch_size * strategy.num_replicas_in_sync
_A : Optional[int] = prepare_dataset(
snake_case_,decode_fn=snake_case_,mask_fn=snake_case_,batch_size=snake_case_,shuffle=snake_case_,shuffle_buffer_size=args.shuffle_buffer_size,)
_A : int = prepare_dataset(
snake_case_,decode_fn=snake_case_,mask_fn=snake_case_,batch_size=snake_case_,shuffle=snake_case_,)
_A : Optional[int] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir,hub_model_id=args.hub_model_id,tokenizer=snake_case_ ) )
model.fit(
snake_case_,validation_data=snake_case_,epochs=args.num_epochs,callbacks=snake_case_,)
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_snake_case = parse_args()
main(args)
| 54
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 1
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( snake_case_ ):
return DownloadCommand(args.model,args.cache_dir,args.force,args.trust_remote_code )
class lowercase ( UpperCamelCase__ ):
@staticmethod
def a__ ( _a ) -> List[str]:
_A : List[Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=_a , default=_a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=_a , help="""Name of the model to download""" )
download_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a , _a ) -> Tuple:
_A : Dict = model
_A : str = cache
_A : List[Any] = force
_A : Optional[Any] = trust_remote_code
def a__ ( self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "bridgetower_vision_model"
def __init__( self , _a=768 , _a=12 , _a=3 , _a=16 , _a=288 , _a=1 , _a=1e-05 , _a=False , _a=True , _a=False , **_a , ) -> List[str]:
super().__init__(**_a )
_A : int = hidden_size
_A : Optional[int] = num_hidden_layers
_A : Tuple = num_channels
_A : Optional[Any] = patch_size
_A : Optional[Any] = image_size
_A : List[Any] = initializer_factor
_A : Tuple = layer_norm_eps
_A : List[Any] = stop_gradient
_A : Dict = share_layernorm
_A : Optional[Any] = remove_last_layer
@classmethod
def a__ ( cls , _a , **_a ) -> "PretrainedConfig":
_A , _A : List[Any] = cls.get_config_dict(_a , **_a )
if config_dict.get("""model_type""" ) == "bridgetower":
_A : List[str] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class lowercase ( UpperCamelCase__ ):
_a = "bridgetower_text_model"
def __init__( self , _a=5_0265 , _a=768 , _a=12 , _a=12 , _a=1 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=514 , _a=1 , _a=1e-05 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , **_a , ) -> Optional[Any]:
super().__init__(**_a )
_A : List[str] = vocab_size
_A : Optional[int] = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : Optional[Any] = hidden_act
_A : List[str] = initializer_factor
_A : Any = intermediate_size
_A : List[str] = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : List[Any] = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : Optional[Any] = layer_norm_eps
_A : List[Any] = position_embedding_type
_A : List[str] = use_cache
_A : Tuple = pad_token_id
_A : List[Any] = bos_token_id
_A : Dict = eos_token_id
@classmethod
def a__ ( cls , _a , **_a ) -> "PretrainedConfig":
_A , _A : Tuple = cls.get_config_dict(_a , **_a )
if config_dict.get("""model_type""" ) == "bridgetower":
_A : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class lowercase ( UpperCamelCase__ ):
_a = "bridgetower"
def __init__( self , _a=True , _a="gelu" , _a=768 , _a=1 , _a=1e-05 , _a=False , _a="add" , _a=12 , _a=6 , _a=False , _a=False , _a=None , _a=None , **_a , ) -> List[Any]:
# TODO: remove this once the Hub files are updated.
_A : Optional[Any] = kwargs.pop("""text_config_dict""" , _a )
_A : Optional[Any] = kwargs.pop("""vision_config_dict""" , _a )
super().__init__(**_a )
_A : Dict = share_cross_modal_transformer_layers
_A : List[Any] = hidden_act
_A : List[Any] = hidden_size
_A : Union[str, Any] = initializer_factor
_A : str = layer_norm_eps
_A : Dict = share_link_tower_layers
_A : Optional[Any] = link_tower_type
_A : List[str] = num_attention_heads
_A : List[Any] = num_hidden_layers
_A : Union[str, Any] = tie_word_embeddings
_A : Union[str, Any] = init_layernorm_from_vision_encoder
if text_config is None:
_A : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_A : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_A : Optional[Any] = BridgeTowerTextConfig(**_a )
_A : str = BridgeTowerVisionConfig(**_a )
@classmethod
def a__ ( cls , _a , _a , **_a ) -> Tuple:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = copy.deepcopy(self.__dict__ )
_A : List[Any] = self.text_config.to_dict()
_A : int = self.vision_config.to_dict()
_A : int = self.__class__.model_type
return output
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_snake_case = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_snake_case = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_snake_case = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
for tf_name, hf_name in patterns:
_A : Optional[Any] = k.replace(snake_case_,snake_case_ )
return k
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = BigBirdPegasusConfig(**snake_case_ )
_A : Tuple = BigBirdPegasusForConditionalGeneration(snake_case_ )
_A : Union[str, Any] = torch_model.state_dict()
_A : List[str] = {}
# separating decoder weights
_A : Any = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
_A : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items(),"""tf -> hf conversion""" ):
_A : Tuple = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
_A : Optional[int] = DECODER_PATTERNS
_A : List[Any] = rename_state_dict_key(snake_case_,snake_case_ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_A : List[Any] = v.T
_A : Dict = torch.from_numpy(snake_case_ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(),"""tf -> hf conversion""" ):
_A : Dict = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
_A : Any = REMAINING_PATTERNS
_A : List[Any] = rename_state_dict_key(snake_case_,snake_case_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_A : List[Any] = v.T
_A : Tuple = torch.from_numpy(snake_case_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
_A : List[Any] = mapping["""model.embed_positions.weight"""]
_A : List[str] = mapping.pop("""model.embed_positions.weight""" )
_A , _A : Dict = torch_model.load_state_dict(snake_case_,strict=snake_case_ )
_A : List[str] = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = tf.train.list_variables(snake_case_ )
_A : Any = {}
_A : Optional[Any] = ["""global_step"""]
for name, shape in tqdm(snake_case_,desc="""converting tf checkpoint to dict""" ):
_A : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_A : List[str] = tf.train.load_variable(snake_case_,snake_case_ )
_A : Optional[int] = array
return tf_weights
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : int = get_tf_weights_as_numpy(snake_case_ )
_A : Optional[Any] = convert_bigbird_pegasus(snake_case_,snake_case_ )
torch_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 1
|
_snake_case = 8.3_1_4_4_5_9_8
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 54
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 1
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowercase ( UpperCamelCase__ ):
_a = "data2vec-audio"
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.1 , _a=0.02 , _a=1e-5 , _a="gelu" , _a=(512, 512, 512, 512, 512, 512, 512) , _a=(5, 2, 2, 2, 2, 2, 2) , _a=(10, 3, 3, 3, 3, 2, 2) , _a=False , _a=16 , _a=19 , _a=5 , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="sum" , _a=False , _a=False , _a=256 , _a=(512, 512, 512, 512, 1500) , _a=(5, 3, 3, 1, 1) , _a=(1, 2, 3, 1, 1) , _a=512 , _a=0 , _a=1 , _a=2 , _a=False , _a=3 , _a=2 , _a=3 , _a=None , **_a , ) -> Union[str, Any]:
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
_A : Any = hidden_size
_A : List[str] = feat_extract_activation
_A : Optional[int] = list(_a )
_A : Tuple = list(_a )
_A : Union[str, Any] = list(_a )
_A : Tuple = conv_bias
_A : Optional[int] = num_conv_pos_embeddings
_A : Tuple = num_conv_pos_embedding_groups
_A : Optional[int] = conv_pos_kernel_size
_A : List[str] = len(self.conv_dim )
_A : str = num_hidden_layers
_A : List[str] = intermediate_size
_A : Any = hidden_act
_A : Optional[Any] = num_attention_heads
_A : Optional[Any] = hidden_dropout
_A : Union[str, Any] = attention_dropout
_A : Optional[Any] = activation_dropout
_A : List[Any] = feat_proj_dropout
_A : int = final_dropout
_A : List[Any] = layerdrop
_A : Optional[int] = layer_norm_eps
_A : Tuple = initializer_range
_A : str = vocab_size
_A : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A : int = mask_time_prob
_A : Union[str, Any] = mask_time_length
_A : Dict = mask_time_min_masks
_A : List[str] = mask_feature_prob
_A : Optional[Any] = mask_feature_length
_A : List[Any] = mask_feature_min_masks
# ctc loss
_A : Any = ctc_loss_reduction
_A : str = ctc_zero_infinity
# adapter
_A : List[Any] = add_adapter
_A : Any = adapter_kernel_size
_A : List[Any] = adapter_stride
_A : Optional[Any] = num_adapter_layers
_A : Union[str, Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_A : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_A : Optional[int] = list(_a )
_A : Dict = list(_a )
_A : Any = list(_a )
_A : int = xvector_output_dim
@property
def a__ ( self ) -> List[str]:
return math.prod(self.conv_stride )
| 54
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "Pix2StructImageProcessor"
_a = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _a , _a ) -> Dict:
_A : Dict = False
super().__init__(_a , _a )
def __call__( self , _a=None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 2048 , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
_A : List[str] = self.tokenizer
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_A : Dict = self.image_processor(
_a , return_tensors=_a , max_patches=_a , **_a )
else:
# add pixel_values and bbox
_A : Dict = self.image_processor(
_a , return_tensors=_a , max_patches=_a , header_text=_a , **_a )
if text is not None and not self.image_processor.is_vqa:
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
if "attention_mask" in text_encoding:
_A : List[str] = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
_A : str = text_encoding.pop("""input_ids""" )
else:
_A : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Union[str, Any]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.tokenizer.model_input_names
_A : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( UpperCamelCase__ ):
_a = 42
_a = 42
def __init__( self , _a , _a ) -> int:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 1 , _a = 50 , _a = None , _a = "pil" , _a = True , **_a , ) -> Union[Tuple, ImagePipelineOutput]:
_A : int = self.unet.config.sample_size
_A : Any = (batch_size, 3, img_size, img_size)
_A : Optional[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A : int = randn_tensor(_a , generator=_a , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A : int = self.scheduler.schedule[t]
_A : int = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A : Any = self.scheduler.add_noise_to_input(_a , _a , generator=_a )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A : Tuple = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A : Tuple = self.scheduler.step(_a , _a , _a , _a )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A : Dict = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A : str = self.scheduler.step_correct(
_a , _a , _a , _a , step_output.prev_sample , step_output["""derivative"""] , )
_A : str = step_output.prev_sample
_A : Tuple = (sample / 2 + 0.5).clamp(0 , 1 )
_A : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A : int = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
_a = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**_a )
_A : int = size if size is not None else {"""shortest_edge""": 384}
_A : str = get_size_dict(_a , default_to_square=_a )
_A : str = do_resize
_A : Optional[Any] = size
# Default value set here for backwards compatibility where the value in config is None
_A : List[str] = crop_pct if crop_pct is not None else 224 / 256
_A : List[Any] = resample
_A : Union[str, Any] = do_rescale
_A : Optional[int] = rescale_factor
_A : List[Any] = do_normalize
_A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , _a , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_A : Dict = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
_A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_A : Any = int(shortest_edge / crop_pct )
_A : Union[str, Any] = get_resize_output_image_size(_a , size=_a , default_to_square=_a )
_A : Any = resize(image=_a , size=_a , resample=_a , data_format=_a , **_a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_a , size=(shortest_edge, shortest_edge) , data_format=_a , **_a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_a , size=(shortest_edge, shortest_edge) , resample=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a = None , **_a , ) -> Dict:
return rescale(_a , scale=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def a__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_A : Optional[int] = do_resize if do_resize is not None else self.do_resize
_A : str = crop_pct if crop_pct is not None else self.crop_pct
_A : Tuple = resample if resample is not None else self.resample
_A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_A : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_A : Tuple = image_mean if image_mean is not None else self.image_mean
_A : List[str] = image_std if image_std is not None else self.image_std
_A : Any = size if size is not None else self.size
_A : str = get_size_dict(_a , default_to_square=_a )
_A : List[str] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_A : List[Any] = [to_numpy_array(_a ) for image in images]
if do_resize:
_A : Dict = [self.resize(image=_a , size=_a , crop_pct=_a , resample=_a ) for image in images]
if do_rescale:
_A : Optional[int] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_A : Optional[int] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_A : Union[str, Any] = [to_channel_dimension_format(_a , _a ) for image in images]
_A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 54
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase ( datasets.BuilderConfig ):
_a = 1_0_0_0_0
_a = None
_a = None
class lowercase ( datasets.ArrowBasedBuilder ):
_a = ParquetConfig
def a__ ( self ) -> Optional[int]:
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self , _a ) -> Any:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_A : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_A : Optional[Any] = data_files
if isinstance(_a , _a ):
_A : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_A : Optional[Any] = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_A : List[str] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_A : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_A : Dict = [dl_manager.iter_files(_a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_a ):
with open(_a , """rb""" ) as f:
_A : int = datasets.Features.from_arrow_schema(pq.read_schema(_a ) )
break
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"""files""": files} ) )
return splits
def a__ ( self , _a ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_A : List[Any] = table_cast(_a , self.info.features.arrow_schema )
return pa_table
def a__ ( self , _a ) -> Tuple:
_A : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , """rb""" ) as f:
_A : Union[str, Any] = pq.ParquetFile(_a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_A : Optional[int] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_a )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise
| 54
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 1
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_ ):
if not head:
return True
# split the list to two parts
_A , _A : Union[str, Any] = head.next, head
while fast and fast.next:
_A : Union[str, Any] = fast.next.next
_A : List[Any] = slow.next
_A : Optional[Any] = slow.next
_A : Union[str, Any] = None # Don't forget here! But forget still works!
# reverse the second part
_A : List[str] = None
while second:
_A : str = second.next
_A : str = node
_A : Tuple = second
_A : List[str] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_A : List[str] = node.next
_A : int = head.next
return True
def lowerCAmelCase_ ( snake_case_ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_A : Tuple = head
while fast and fast.next:
_A , _A : Dict = fast.next.next, slow.next
# 2. Push the second half into the stack
_A : Any = [slow.val]
while slow.next:
_A : Dict = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_A : Optional[Any] = cur.next
return True
def lowerCAmelCase_ ( snake_case_ ):
if not head or not head.next:
return True
_A : int = {}
_A : Optional[int] = 0
while head:
if head.val in d:
d[head.val].append(snake_case_ )
else:
_A : List[Any] = [pos]
_A : int = head.next
pos += 1
_A : Tuple = pos - 1
_A : str = 0
for v in d.values():
if len(snake_case_ ) % 2 != 0:
middle += 1
else:
_A : Dict = 0
for i in range(0,len(snake_case_ ) ):
if v[i] + v[len(snake_case_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 54
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 1
|
import math
def lowerCAmelCase_ ( snake_case_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5,int(math.sqrt(snake_case_ ) + 1 ),6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( snake_case_ = 10001 ):
try:
_A : Dict = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
_A : list[int] = []
_A : List[Any] = 2
while len(snake_case_ ) < nth:
if is_prime(snake_case_ ):
primes.append(snake_case_ )
num += 1
else:
num += 1
return primes[len(snake_case_ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CanineTokenizer
_a = False
def a__ ( self ) -> str:
super().setUp()
_A : Union[str, Any] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> List[str]:
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def a__ ( self , **_a ) -> CanineTokenizer:
_A : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
_A : int = 1024
return tokenizer
@require_torch
def a__ ( self ) -> int:
_A : Union[str, Any] = self.canine_tokenizer
_A : Optional[int] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
_A : Optional[int] = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
_A : Union[str, Any] = tokenizer(_a , padding=_a , return_tensors="""pt""" )
self.assertIsInstance(_a , _a )
_A : str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_a , _a )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def a__ ( self ) -> Dict:
_A : List[Any] = self.canine_tokenizer
_A : Dict = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
_A : Any = tokenizer(_a , padding=_a , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , _a )
self.assertIn("""attention_mask""" , _a )
self.assertIn("""token_type_ids""" , _a )
@require_torch
def a__ ( self ) -> List[Any]:
_A : Tuple = self.canine_tokenizer
_A : int = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
_A : str = tokenizer(
text_target=_a , max_length=32 , padding="""max_length""" , truncation=_a , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def a__ ( self ) -> int:
# safety check on max_len default value so we are sure the test works
_A : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_A : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A : str = tempfile.mkdtemp()
_A : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
_A : Union[str, Any] = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
_A : str = tokenizer.__class__.from_pretrained(_a )
_A : Dict = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
shutil.rmtree(_a )
_A : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A : Optional[int] = tempfile.mkdtemp()
_A : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
_A : List[Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_A : int = chr(0xe0_07 )
additional_special_tokens.append(_a )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_A : int = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
_A : List[str] = tokenizer.__class__.from_pretrained(_a )
_A : Dict = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
self.assertIn(_a , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_A : Optional[Any] = tokenizer.__class__.from_pretrained(_a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A , _A : int = self.get_clean_sequence(_a )
# a special token for Canine can be defined as follows:
_A : Tuple = 0xe0_05
_A : str = chr(_a )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_A : str = tokenizer.encode(_a , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_A : List[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_a )
_A : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_A : str = tokenizer.encode(_a , add_special_tokens=_a )
_A : str = tokenizer.encode(_a , add_special_tokens=_a )
self.assertEqual(_a , input_encoded + special_token_id )
_A : str = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def a__ ( self ) -> str:
_A : str = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A : Tuple = chr(0xe0_05 )
_A : int = chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_a )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
_A : Union[str, Any] = tokenizer.tokenize(_a )
_A : List[Any] = tokenizer.tokenize(_a )
self.assertEqual(len(_a ) , 1 )
self.assertEqual(len(_a ) , 1 )
self.assertEqual(token_a[0] , _a )
self.assertEqual(token_a[0] , _a )
@require_tokenizers
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
_A : Optional[Any] = 0xe0_06
_A : str = chr(_a )
_A : Any = AddedToken(_a , lstrip=_a )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_a )
tokenizer.from_pretrained(_a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_a )
with open(os.path.join(_a , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_A : Union[str, Any] = json.load(_a )
with open(os.path.join(_a , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_A : int = json.load(_a )
# a special token for Canine can be defined as follows:
_A : List[Any] = 0xe0_06
_A : Optional[int] = chr(_a )
_A : str = [new_token_a]
_A : str = [new_token_a]
with open(os.path.join(_a , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_a , _a )
with open(os.path.join(_a , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_a , _a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_A : Optional[Any] = tokenizer_class.from_pretrained(_a , extra_ids=0 )
self.assertIn(_a , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_A : Dict = 0xe0_07
_A : Optional[Any] = chr(_a )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_A : int = [AddedToken(_a , lstrip=_a )]
_A : List[str] = tokenizer_class.from_pretrained(
_a , additional_special_tokens=_a , extra_ids=0 )
self.assertIn(_a , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def a__ ( self ) -> int:
_A : Optional[Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A : Tuple = """hello world"""
if self.space_between_special_tokens:
_A : Tuple = """[CLS] hello world [SEP]"""
else:
_A : int = input
_A : Optional[int] = tokenizer.encode(_a , add_special_tokens=_a )
_A : Dict = tokenizer.decode(_a , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_a , [output, output.lower()] )
def a__ ( self ) -> Optional[int]:
_A : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A : Any = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_A : int = """a"""
_A : Optional[int] = ord(_a )
for attr in attributes_list:
setattr(_a , attr + """_id""" , _a )
self.assertEqual(getattr(_a , _a ) , _a )
self.assertEqual(getattr(_a , attr + """_id""" ) , _a )
setattr(_a , attr + """_id""" , _a )
self.assertEqual(getattr(_a , _a ) , _a )
self.assertEqual(getattr(_a , attr + """_id""" ) , _a )
setattr(_a , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_a , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_a , """additional_special_tokens_ids""" ) , [] )
_A : Optional[Any] = 0xe0_06
_A : Dict = chr(_a )
setattr(_a , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(_a , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(_a , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> int:
pass
def a__ ( self ) -> int:
pass
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> str:
pass
| 54
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(snake_case_,snake_case_ ):
raise TypeError("""Input value must be an 'int' type""" )
_A : Dict = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 1
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = os.path.abspath(snake_case_ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
_A : List[str] = tf.train.list_variables(snake_case_ )
_A : List[Any] = []
_A : Optional[Any] = []
_A : Union[str, Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_A : Tuple = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
_A : int = name[1:]
# figure out how many levels deep the name is
_A : Any = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(snake_case_ )
# read data
_A : List[str] = tf.train.load_variable(snake_case_,snake_case_ )
names.append("""/""".join(snake_case_ ) )
arrays.append(snake_case_ )
logger.info(f'''Read a total of {len(snake_case_ ):,} layers''' )
# Sanity check
if len(set(snake_case_ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(snake_case_ ) )})''' )
_A : List[str] = list(set(snake_case_ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(snake_case_,snake_case_ ):
_A : Optional[Any] = full_name.split("""/""" )
_A : Any = model
_A : Any = []
for i, m_name in enumerate(snake_case_ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
_A : Optional[Any] = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
_A : int = getattr(snake_case_,"""embeddings""" )
_A : Dict = getattr(snake_case_,"""LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
_A : Any = getattr(snake_case_,"""encoder""" )
_A : Tuple = getattr(snake_case_,"""layer""" )
_A : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
_A : str = getattr(snake_case_,"""pooler""" )
_A : Union[str, Any] = getattr(snake_case_,"""dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
_A : List[str] = getattr(snake_case_,"""embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
_A : Optional[Any] = getattr(snake_case_,"""word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
_A : Optional[Any] = getattr(snake_case_,"""position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
_A : Optional[Any] = getattr(snake_case_,"""token_type_embeddings""" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
_A : Any = getattr(snake_case_,"""weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
_A : Optional[int] = getattr(snake_case_,"""attention""" )
_A : List[Any] = getattr(snake_case_,"""self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
_A : Optional[int] = getattr(snake_case_,"""attention""" )
_A : Tuple = getattr(snake_case_,"""output""" )
_A : int = getattr(snake_case_,"""LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
_A : Optional[Any] = getattr(snake_case_,"""attention""" )
_A : Any = getattr(snake_case_,"""output""" )
_A : Optional[int] = getattr(snake_case_,"""dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
_A : Any = getattr(snake_case_,"""output""" )
_A : Union[str, Any] = getattr(snake_case_,"""dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
_A : str = getattr(snake_case_,"""output""" )
_A : Union[str, Any] = getattr(snake_case_,"""LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
_A : Optional[int] = getattr(snake_case_,"""key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
_A : Optional[int] = getattr(snake_case_,"""query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
_A : Optional[Any] = getattr(snake_case_,"""value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
_A : Dict = getattr(snake_case_,"""intermediate""" )
_A : Union[str, Any] = getattr(snake_case_,"""dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
_A : int = getattr(snake_case_,"""output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
_A : List[str] = getattr(snake_case_,"""bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
_A : Optional[Any] = getattr(snake_case_,"""weight""" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
_A : List[str] = """.""".join(snake_case_ )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""",snake_case_ ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""",snake_case_ ):
_A : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_A : Tuple = array.transpose()
if pointer.shape == array.shape:
_A : Union[str, Any] = torch.from_numpy(snake_case_ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# Instantiate model
logger.info(f'''Loading model based on config from {config_path}...''' )
_A : Tuple = BertConfig.from_json_file(snake_case_ )
_A : Union[str, Any] = BertModel(snake_case_ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(snake_case_,snake_case_,snake_case_ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict(),snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
_snake_case = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 54
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 1
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
_snake_case = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ElectraTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ) -> int:
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
_A : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _a ) != tokenize_chinese_chars
):
_A : Dict = getattr(_a , normalizer_state.pop("""type""" ) )
_A : Tuple = do_lower_case
_A : str = strip_accents
_A : int = tokenize_chinese_chars
_A : List[Any] = normalizer_class(**_a )
_A : Optional[int] = do_lower_case
def a__ ( self , _a , _a=None ) -> str:
_A : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
_A : str = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 54
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.