code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
lowerCamelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __magic_name__ (A__ ):
def __a ( self , _a ) -> Optional[Any]:
os.makedirs(__A , exist_ok=__A )
lowerCAmelCase_ = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase_ = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase_ = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(__A , f"{split}.{field}" ) , "w" ) as f:
f.write(__A )
def __a ( self , _a , _a = "pytorch" ) -> Optional[Any]:
lowerCAmelCase_ = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ = os.path.join(__A , "output" )
lowerCAmelCase_ = os.path.join(__A , "data" )
self._create_dummy_data(data_dir=__A )
lowerCAmelCase_ = f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
lowerCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__A , env=self.get_env() )
lowerCAmelCase_ = os.path.join(__A , "metrics.json" )
with open(__A ) as f:
lowerCAmelCase_ = json.load(__A )
return result
@require_torch_gpu
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def __a ( self ) -> str:
lowerCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 351
|
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22
| 0
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ['''model.decoder.embed_positions.weights''']
def A(__a: Tuple ):
if "emb" in name:
lowerCAmelCase_ = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
lowerCAmelCase_ = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
lowerCAmelCase_ = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
lowerCAmelCase_ = name.replace("linear1" , "fc1" )
if "linear2" in name:
lowerCAmelCase_ = name.replace("linear2" , "fc2" )
if "norm1" in name:
lowerCAmelCase_ = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
lowerCAmelCase_ = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
lowerCAmelCase_ = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
lowerCAmelCase_ = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
lowerCAmelCase_ = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCAmelCase_ = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def A(__a: Optional[Any] , __a: Optional[Any] ):
lowerCAmelCase_ = list(state_dict.keys() )
lowerCAmelCase_ = {}
for key in keys:
lowerCAmelCase_ = state_dict.pop(__UpperCAmelCase )
lowerCAmelCase_ = rename_keys(__UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCAmelCase_ = val[:hidden_size, :]
lowerCAmelCase_ = val[hidden_size : 2 * hidden_size, :]
lowerCAmelCase_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCAmelCase_ = val
else:
lowerCAmelCase_ = val
return state_dict, enc_dec_proj_state_dict
def A(__a: str ):
if checkpoint == "small":
# default config values
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 24
lowerCAmelCase_ = 16
elif checkpoint == "medium":
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 48
lowerCAmelCase_ = 24
elif checkpoint == "large":
lowerCAmelCase_ = 2048
lowerCAmelCase_ = 48
lowerCAmelCase_ = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
lowerCAmelCase_ = MusicgenDecoderConfig(
hidden_size=__UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__UpperCAmelCase , num_attention_heads=__UpperCAmelCase , )
return config
@torch.no_grad()
def A(__a: Optional[int] , __a: Optional[Any]=None , __a: Union[str, Any]=None , __a: int="cpu" ):
lowerCAmelCase_ = MusicGen.get_pretrained(__UpperCAmelCase , device=__UpperCAmelCase )
lowerCAmelCase_ = decoder_config_from_checkpoint(__UpperCAmelCase )
lowerCAmelCase_ = fairseq_model.lm.state_dict()
lowerCAmelCase_ = rename_state_dict(
__UpperCAmelCase , hidden_size=decoder_config.hidden_size )
lowerCAmelCase_ = TaEncoderModel.from_pretrained("t5-base" )
lowerCAmelCase_ = EncodecModel.from_pretrained("facebook/encodec_32khz" )
lowerCAmelCase_ = MusicgenForCausalLM(__UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCAmelCase_ = decoder.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__UpperCAmelCase ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
lowerCAmelCase_ = MusicgenForConditionalGeneration(text_encoder=__UpperCAmelCase , audio_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__UpperCAmelCase )
# check we can do a forward pass
lowerCAmelCase_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCAmelCase_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCAmelCase_ = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
lowerCAmelCase_ = AutoTokenizer.from_pretrained("t5-base" )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
lowerCAmelCase_ = MusicgenProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
# set the appropriate bos/pad token ids
lowerCAmelCase_ = 2048
lowerCAmelCase_ = 2048
# set other default generation config params
lowerCAmelCase_ = int(30 * audio_encoder.config.frame_rate )
lowerCAmelCase_ = True
lowerCAmelCase_ = 3.0
if pytorch_dump_folder is not None:
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__UpperCAmelCase )
processor.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 352
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22
| 0
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A(__a: str , __a: dict ):
lowerCAmelCase_ = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , "html.parser" )
lowerCAmelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
lowerCAmelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCamelCase__ = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 20_18,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 353
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22
| 0
|
import numpy as np
def A(__a: np.ndarray , __a: float ):
return np.where(vector > 0 , lowerCamelCase__ , (alpha * (np.exp(lowerCamelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
import datasets
lowerCamelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCamelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A(__a: Dict , __a: Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __a ( self , _a , _a ) -> List[str]:
return {"accuracy": simple_accuracy(_a , _a )}
| 22
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __magic_name__ (lowercase__ ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = None
def __init__( self , _a=None , _a=None , _a=None , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a=False , _a=False , **_a , ) -> Tuple:
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , add_prefix_space=_a , clean_up_tokenization_spaces=_a , **_a , )
lowerCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _a ) != add_prefix_space:
lowerCAmelCase_ = getattr(_a , pre_tok_state.pop("type" ) )
lowerCAmelCase_ = add_prefix_space
lowerCAmelCase_ = pre_tok_class(**_a )
lowerCAmelCase_ = add_prefix_space
def __a ( self , *_a , **_a ) -> Tuple:
lowerCAmelCase_ = kwargs.get("is_split_into_words" , _a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*_a , **_a )
def __a ( self , *_a , **_a ) -> int:
lowerCAmelCase_ = kwargs.get("is_split_into_words" , _a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*_a , **_a )
def __a ( self , _a , _a = None ) -> List[Any]:
lowerCAmelCase_ = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
lowerCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 355
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = '''bert-base-cased'''
lowerCamelCase__ = '''google/pegasus-xsum'''
lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''sshleifer/tiny-mbart'''
lowerCamelCase__ = '''sshleifer/tiny-marian-en-de'''
def A(__a: Path , __a: list ):
lowerCAmelCase_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
def A(__a: str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , F"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ (__lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase_ = {x.name for x in save_dir.iterdir()}
lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 )
lowerCAmelCase_ = 64
lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
lowerCAmelCase_ = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for batch in data_loader:
lowerCAmelCase_ = batch["input_ids"].shape
lowerCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"too many tokens in {len(_a )} batches" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 )
lowerCAmelCase_ = 2
lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
lowerCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __a ( self , _a=1000 , _a=128 ) -> str:
if os.getenv("USE_REAL_DATA" , _a ):
lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCAmelCase_ = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCAmelCase_ = max_len * 4
save_len_file(_a , _a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset()
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 22
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __magic_name__ :
lowerCamelCase__ = 42
lowerCamelCase__ = None
lowerCamelCase__ = None
def A():
lowerCAmelCase_ = Node(1 )
lowerCAmelCase_ = Node(2 )
lowerCAmelCase_ = Node(3 )
lowerCAmelCase_ = Node(4 )
lowerCAmelCase_ = Node(5 )
return tree
def A(__a: Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def A(__a: Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def A(__a: Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def A(__a: Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def A(__a: Node | None ):
lowerCAmelCase_ = []
if root is None:
return output
lowerCAmelCase_ = deque([root] )
while process_queue:
lowerCAmelCase_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def A(__a: Node | None , __a: int ):
lowerCAmelCase_ = []
def populate_output(__a: Node | None , __a: int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def A(__a: Node | None , __a: int ):
lowerCAmelCase_ = []
def populate_output(__a: Node | None , __a: int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def A(__a: Node | None ):
if root is None:
return []
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = height(__snake_case )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__snake_case , __snake_case ) )
lowerCAmelCase_ = 1
else:
output.append(get_nodes_from_right_to_left(__snake_case , __snake_case ) )
lowerCAmelCase_ = 0
return output
def A(): # Main function for testing.
lowerCAmelCase_ = make_tree()
print(F"In-order Traversal: {inorder(__snake_case )}" )
print(F"Pre-order Traversal: {preorder(__snake_case )}" )
print(F"Post-order Traversal: {postorder(__snake_case )}" , "\n" )
print(F"Height of Tree: {height(__snake_case )}" , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(__snake_case ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(__snake_case ) + 1 ):
print(F"Level {level}:" , get_nodes_from_left_to_right(__snake_case , level=__snake_case ) )
print("\nZigZag order Traversal: " )
print(zigzag(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 356
|
def A(__a: Optional[Any] ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = sum(__a )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff
| 22
| 0
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (enum.Enum ):
lowerCamelCase__ = 0
lowerCamelCase__ = 1
@add_end_docstrings(__lowercase )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''generated'''
def __init__( self , *_a , **_a ) -> Optional[Any]:
super().__init__(*_snake_case , **_snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> List[str]:
lowerCAmelCase_ = {}
if truncation is not None:
lowerCAmelCase_ = truncation
lowerCAmelCase_ = generate_kwargs
lowerCAmelCase_ = {}
if return_tensors is not None and return_type is None:
lowerCAmelCase_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowerCAmelCase_ = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase_ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase_ = self.tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
if len(_snake_case ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowerCAmelCase_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self , _a , _a , _a ) -> Any:
return True
def __a ( self , *_a , _a ) -> List[str]:
lowerCAmelCase_ = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , _snake_case ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
lowerCAmelCase_ = ([prefix + arg for arg in args[0]],)
lowerCAmelCase_ = True
elif isinstance(args[0] , _snake_case ):
lowerCAmelCase_ = (prefix + args[0],)
lowerCAmelCase_ = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
lowerCAmelCase_ = self.tokenizer(*_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *_a , **_a ) -> Dict:
lowerCAmelCase_ = super().__call__(*_snake_case , **_snake_case )
if (
isinstance(args[0] , _snake_case )
and all(isinstance(_snake_case , _snake_case ) for el in args[0] )
and all(len(_snake_case ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self , _a , _a=TruncationStrategy.DO_NOT_TRUNCATE , **_a ) -> Optional[Any]:
lowerCAmelCase_ = self._parse_and_tokenize(_snake_case , truncation=_snake_case , **_snake_case )
return inputs
def __a ( self , _a , **_a ) -> Union[str, Any]:
if self.framework == "pt":
lowerCAmelCase_ , lowerCAmelCase_ = model_inputs["input_ids"].shape
elif self.framework == "tf":
lowerCAmelCase_ , lowerCAmelCase_ = tf.shape(model_inputs["input_ids"] ).numpy()
lowerCAmelCase_ = generate_kwargs.get("min_length" , self.model.config.min_length )
lowerCAmelCase_ = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(_snake_case , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
lowerCAmelCase_ = self.model.generate(**_snake_case , **_snake_case )
lowerCAmelCase_ = output_ids.shape[0]
if self.framework == "pt":
lowerCAmelCase_ = output_ids.reshape(_snake_case , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase_ = tf.reshape(_snake_case , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self , _a , _a=ReturnType.TEXT , _a=False ) -> Any:
lowerCAmelCase_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowerCAmelCase_ = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
lowerCAmelCase_ = {
f"{self.return_name}_text": self.tokenizer.decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case , )
}
records.append(_snake_case )
return records
@add_end_docstrings(__lowercase )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''summary'''
def __call__( self , *_a , **_a ) -> Tuple:
return super().__call__(*_snake_case , **_snake_case )
def __a ( self , _a , _a , _a ) -> Tuple:
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" )
@add_end_docstrings(__lowercase )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''translation'''
def __a ( self , _a , _a , _a ) -> Dict:
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
"increasing your max_length manually, e.g. translator(\'...\', max_length=400)" )
return True
def __a ( self , *_a , _a=TruncationStrategy.DO_NOT_TRUNCATE , _a=None , _a=None ) -> int:
if getattr(self.tokenizer , "_build_translation_inputs" , _snake_case ):
return self.tokenizer._build_translation_inputs(
*_snake_case , return_tensors=self.framework , truncation=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case )
else:
return super()._parse_and_tokenize(*_snake_case , truncation=_snake_case )
def __a ( self , _a=None , _a=None , **_a ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = super()._sanitize_parameters(**_snake_case )
if src_lang is not None:
lowerCAmelCase_ = src_lang
if tgt_lang is not None:
lowerCAmelCase_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowerCAmelCase_ = kwargs.get("task" , self.task )
lowerCAmelCase_ = task.split("_" )
if task and len(_snake_case ) == 4:
# translation, XX, to YY
lowerCAmelCase_ = items[1]
lowerCAmelCase_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *_a , **_a ) -> Union[str, Any]:
return super().__call__(*_snake_case , **_snake_case )
| 357
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (lowercase__ ):
lowerCamelCase__ = 'encoder-decoder'
lowerCamelCase__ = True
def __init__( self , **_a ) -> Union[str, Any]:
super().__init__(**_UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ = kwargs.pop("encoder" )
lowerCAmelCase_ = encoder_config.pop("model_type" )
lowerCAmelCase_ = kwargs.pop("decoder" )
lowerCAmelCase_ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = True
@classmethod
def __a ( cls , _a , _a , **_a ) -> Optional[Any]:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowerCAmelCase_ = True
lowerCAmelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCamelCase )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.encoder.to_dict()
lowerCAmelCase_ = self.decoder.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 358
|
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A(__a: str ):
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 22
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = """▁"""
lowerCamelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCamelCase__ = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
lowerCamelCase__ = {
"""google/pegasus-xsum""": 5_12,
}
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (__A ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , _a , _a="<pad>" , _a="</s>" , _a="<unk>" , _a="<mask_2>" , _a="<mask_1>" , _a=None , _a=103 , _a = None , **_a , ) -> List[Any]:
lowerCAmelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(__lowercase , __lowercase ):
raise TypeError(
f"additional_special_tokens should be of type {type(__lowercase )}, but is"
f" {type(__lowercase )}" )
lowerCAmelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(__lowercase ) , self.offset - 1 )
]
if len(set(__lowercase ) ) != len(__lowercase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
lowerCAmelCase_ = additional_special_tokens_extended
else:
lowerCAmelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowercase , unk_token=__lowercase , mask_token=__lowercase , pad_token=__lowercase , mask_token_sent=__lowercase , offset=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowerCAmelCase_ = mask_token_sent
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
# add special tokens to encoder dict
lowerCAmelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
@property
def __a ( self ) -> Union[str, Any]:
return len(self.sp_model ) + self.offset
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , _a ) -> Tuple:
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , _a ) -> List[Any]:
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def __a ( self , _a ) -> Tuple:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase_ = self.sp_model.piece_to_id(__lowercase )
return sp_id + self.offset
def __a ( self , _a ) -> List[Any]:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def __a ( self , _a ) -> str:
lowerCAmelCase_ = []
lowerCAmelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
lowerCAmelCase_ = []
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __a ( self , _a=False ) -> Optional[Any]:
return 1
def __a ( self , _a ) -> str:
lowerCAmelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __a ( self , _a , _a = None , _a = False ) -> Tuple:
if already_has_special_tokens:
return self._special_token_mask(__lowercase )
elif token_ids_a is None:
return self._special_token_mask(__lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __a ( self , _a , _a=None ) -> Union[str, Any]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __a ( self , _a , _a = None ) -> Any:
if not os.path.isdir(__lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
__lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , "wb" ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 359
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 0
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: int ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__lowerCamelCase , __lowerCamelCase )
for pat in pats:
lowerCAmelCase_ = key.replace(__lowerCamelCase , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Optional[Any] , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: List[str] , __a: Dict , __a: int=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__lowerCamelCase ) )
lowerCAmelCase_ = flatten_dict(__lowerCamelCase )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__lowerCamelCase )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ = rename_key_and_reshape_tensor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
| 360
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class __magic_name__ (__SCREAMING_SNAKE_CASE ):
lowerCamelCase__ = 'xlm'
lowerCamelCase__ = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , _a=30145 , _a=2048 , _a=12 , _a=16 , _a=0.1 , _a=0.1 , _a=True , _a=False , _a=False , _a=False , _a=1 , _a=True , _a=512 , _a=2048**-0.5 , _a=1E-12 , _a=0.0_2 , _a=0 , _a=1 , _a=2 , _a=3 , _a=5 , _a=True , _a="first" , _a=True , _a=None , _a=True , _a=0.1 , _a=5 , _a=5 , _a=0 , _a=0 , _a=2 , _a=0 , **_a , ) -> List[Any]:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = emb_dim
lowerCAmelCase_ = n_layers
lowerCAmelCase_ = n_heads
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = gelu_activation
lowerCAmelCase_ = sinusoidal_embeddings
lowerCAmelCase_ = causal
lowerCAmelCase_ = asm
lowerCAmelCase_ = n_langs
lowerCAmelCase_ = use_lang_emb
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = bos_index
lowerCAmelCase_ = eos_index
lowerCAmelCase_ = pad_index
lowerCAmelCase_ = unk_index
lowerCAmelCase_ = mask_index
lowerCAmelCase_ = is_encoder
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = embed_init_std
lowerCAmelCase_ = init_std
lowerCAmelCase_ = summary_type
lowerCAmelCase_ = summary_use_proj
lowerCAmelCase_ = summary_activation
lowerCAmelCase_ = summary_proj_to_labels
lowerCAmelCase_ = summary_first_dropout
lowerCAmelCase_ = start_n_top
lowerCAmelCase_ = end_n_top
lowerCAmelCase_ = mask_token_id
lowerCAmelCase_ = lang_id
if "n_words" in kwargs:
lowerCAmelCase_ = kwargs["n_words"]
super().__init__(pad_token_id=_a , bos_token_id=_a , **_a )
class __magic_name__ (__SCREAMING_SNAKE_CASE ):
@property
def __a ( self ) -> Dict:
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 361
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22
| 0
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ (__lowercase ):
lowerCamelCase__ = DistilBertTokenizer
lowerCamelCase__ = DistilBertTokenizerFast
lowerCamelCase__ = True
@slow
def __a ( self ) -> Tuple:
lowerCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 362
|
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = XGLMTokenizer
lowerCamelCase__ = XGLMTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __a ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ = XGLMTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = """<pad>"""
lowerCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(_a ) , 1008 )
def __a ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = XGLMTokenizer(_a , keep_accents=_a )
lowerCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __a ( self ) -> Any:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def __a ( self ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_a , f.name )
lowerCAmelCase_ = XGLMTokenizer(f.name , keep_accents=_a )
lowerCAmelCase_ = pickle.dumps(_a )
pickle.loads(_a )
def __a ( self ) -> str:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = """I was born in 92000, and this is falsé."""
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = """Hello World!"""
lowerCAmelCase_ = [2, 31227, 4447, 35]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __a ( self ) -> Dict:
lowerCAmelCase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowerCAmelCase_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __a ( self ) -> Dict:
lowerCAmelCase_ = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="facebook/xglm-564M" , padding=_a , )
| 363
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __magic_name__ (a_ ):
lowerCamelCase__ = '''facebook/bart-large-mnli'''
lowerCamelCase__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowerCamelCase__ = '''text_classifier'''
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSequenceClassification
lowerCamelCase__ = ['''text''', ['''text''']]
lowerCamelCase__ = ['''text''']
def __a ( self ) -> Optional[int]:
super().setup()
lowerCAmelCase_ = self.model.config
lowerCAmelCase_ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
lowerCAmelCase_ = int(lowercase_ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def __a ( self , _a , _a ) -> int:
lowerCAmelCase_ = labels
return self.pre_processor(
[text] * len(lowercase_ ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def __a ( self , _a ) -> Union[str, Any]:
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 0
|
def A(__a: int , __a: int ):
return int((input_a, input_a).count(0 ) == 0 )
def A():
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 365
|
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , ) -> List[str]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def __a ( self ) -> int:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> List[str]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.get_config()
lowerCAmelCase_ = 300
return config
def __a ( self ) -> Dict:
(
lowerCAmelCase_
) = self.prepare_config_and_inputs()
lowerCAmelCase_ = True
lowerCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
lowerCAmelCase_ = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> str:
lowerCAmelCase_ = True
lowerCAmelCase_ = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
lowerCAmelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
lowerCAmelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
lowerCAmelCase_ = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> int:
lowerCAmelCase_ = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
lowerCAmelCase_ = self.num_choices
lowerCAmelCase_ = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) = config_and_inputs
lowerCAmelCase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = ()
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = MraModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __a ( self ) -> int:
self.config_tester.run_common_tests()
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __a ( self ) -> int:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __a ( self ) -> str:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def __a ( self ) -> Any:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def __a ( self ) -> List[Any]:
return
@require_torch
class __magic_name__ (unittest.TestCase ):
@slow
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
lowerCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ = model(__UpperCAmelCase )[0]
lowerCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase_ = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __a ( self ) -> Dict:
lowerCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
lowerCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ = model(__UpperCAmelCase )[0]
lowerCAmelCase_ = 50265
lowerCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase_ = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
lowerCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ = model(__UpperCAmelCase )[0]
lowerCAmelCase_ = 50265
lowerCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase_ = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 366
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22
| 0
|
import pytest
lowerCamelCase__ = '''__dummy_dataset1__'''
lowerCamelCase__ = '''\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'''
@pytest.fixture
def A():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A(__a: Optional[int] , __a: List[Any] , __a: str ):
lowerCAmelCase_ = dataset_loading_script_name
lowerCAmelCase_ = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=a_ )
lowerCAmelCase_ = script_dir / F"{script_name}.py"
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
| 367
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22
| 0
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.0_2 , _a=3 , _a=None , ) -> Optional[int]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ = (image_size // patch_size) ** 2
lowerCAmelCase_ = num_patches + 1
def __a ( self ) -> int:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __a ( self , _a , _a , _a ) -> Union[str, Any]:
lowerCAmelCase_ = TFViTModel(config=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowerCAmelCase_ = self.image_size // 2
lowerCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCAmelCase_ = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __a ( self , _a , _a , _a ) -> str:
lowerCAmelCase_ = self.type_sequence_label_size
lowerCAmelCase_ = TFViTForImageClassification(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowerCAmelCase_ = self.image_size // 2
lowerCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCAmelCase_ = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = TFViTForImageClassification(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ) -> str:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCamelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = TFViTModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __a ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __a ( self ) -> List[str]:
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __a ( self ) -> Optional[Any]:
pass
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def __a ( self ) -> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __a ( self ) -> int:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A():
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __magic_name__ (unittest.TestCase ):
@cached_property
def __a ( self ) -> List[Any]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="tf" )
# forward pass
lowerCAmelCase_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
| 368
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = '''▁'''
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
lowerCamelCase__ = {
'''google/pegasus-xsum''': 5_12,
}
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (a__ ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , _a , _a="<pad>" , _a="</s>" , _a="<unk>" , _a="<mask_2>" , _a="<mask_1>" , _a=None , _a=103 , _a = None , **_a , ) -> None:
lowerCAmelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError(
f"additional_special_tokens should be of type {type(_lowerCamelCase )}, but is"
f" {type(_lowerCamelCase )}" )
lowerCAmelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(_lowerCamelCase ) , self.offset - 1 )
]
if len(set(_lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
lowerCAmelCase_ = additional_special_tokens_extended
else:
lowerCAmelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , mask_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token_sent=_lowerCamelCase , offset=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
lowerCAmelCase_ = mask_token_sent
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
# add special tokens to encoder dict
lowerCAmelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
@property
def __a ( self ) -> int:
return len(self.sp_model ) + self.offset
def __a ( self ) -> Dict[str, int]:
lowerCAmelCase_ = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , _a ) -> Union[str, Any]:
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , _a ) -> List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def __a ( self , _a ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase_ = self.sp_model.piece_to_id(_lowerCamelCase )
return sp_id + self.offset
def __a ( self , _a ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def __a ( self , _a ) -> Union[str, Any]:
lowerCAmelCase_ = []
lowerCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
lowerCAmelCase_ = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __a ( self , _a=False ) -> str:
return 1
def __a ( self , _a ) -> int:
lowerCAmelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __a ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(_lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __a ( self , _a , _a=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __a ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , "wb" ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 369
|
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _UpperCAmelCase (unittest.TestCase ):
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCAmelCase_ = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self , **_a ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , **_a ) -> Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def __a ( self ) -> Dict:
lowerCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ) -> int:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def __a ( self ) -> Dict:
lowerCAmelCase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase_ = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
lowerCAmelCase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(_lowerCAmelCase , return_tensors="np" )
lowerCAmelCase_ = processor(images=_lowerCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCAmelCase_ = """lower newer"""
lowerCAmelCase_ = processor(text=_lowerCAmelCase )
lowerCAmelCase_ = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCAmelCase_ = """lower newer"""
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(_lowerCAmelCase ):
processor()
def __a ( self ) -> str:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ = processor.batch_decode(_lowerCAmelCase )
lowerCAmelCase_ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowerCAmelCase_ = """lower newer"""
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 370
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
lowerCamelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
lowerCamelCase__ = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
lowerCamelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __magic_name__ (_UpperCAmelCase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __magic_name__ (_UpperCAmelCase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase__ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase__ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class __magic_name__ :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> int:
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
lowerCAmelCase_ = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
lowerCAmelCase_ = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
f"There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts." )
lowerCAmelCase_ = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
lowerCAmelCase_ = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
lowerCAmelCase_ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
lowerCAmelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase_ = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def __a ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> str:
lowerCAmelCase_ = reader_input["""input_ids"""]
lowerCAmelCase_ = reader_output[:3]
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
lowerCAmelCase_ = []
for doc_id in sorted_docs:
lowerCAmelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase_ = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self , _a , _a , _a , _a , ) -> str:
lowerCAmelCase_ = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase_ = sorted(lowercase_ , key=lambda _a : x[1] , reverse=lowercase_ )
lowerCAmelCase_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
lowerCAmelCase_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class __magic_name__ (_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
| 371
|
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22
| 0
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> str:
lowerCAmelCase_ = torch.nn.Linear(10 , 10 )
lowerCAmelCase_ = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 350
|
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22
| 0
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.txt"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowerCamelCase__ = {
"""openbmb/cpm-ant-10b""": 10_24,
}
def A(__a: str ):
lowerCAmelCase_ = collections.OrderedDict()
with open(_snake_case , "r" , encoding="utf-8" ) as reader:
lowerCAmelCase_ = reader.readlines()
for index, token in enumerate(_snake_case ):
lowerCAmelCase_ = token.rstrip("\n" )
lowerCAmelCase_ = index
return vocab
class __magic_name__ (__snake_case ):
def __init__( self , _a , _a="<unk>" , _a=200 ) -> Dict:
lowerCAmelCase_ = vocab
lowerCAmelCase_ = unk_token
lowerCAmelCase_ = max_input_chars_per_word
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCAmelCase_ = 0
lowerCAmelCase_ = []
while start < len(a_ ):
lowerCAmelCase_ = len(a_ )
lowerCAmelCase_ = None
while start < end:
lowerCAmelCase_ = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowerCAmelCase_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
lowerCAmelCase_ = end
return sub_tokens
class __magic_name__ (__snake_case ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = False
def __init__( self , _a , _a="<d>" , _a="</d>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="<unk>" , _a="</n>" , _a="</_>" , _a="left" , **_a , ) -> List[Any]:
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
lowerCAmelCase_ = bod_token
lowerCAmelCase_ = eod_token
lowerCAmelCase_ = load_vocab(a_ )
lowerCAmelCase_ = self.encoder[space_token]
lowerCAmelCase_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCAmelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _a : x[1] ) )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self ) -> Union[str, Any]:
return self.encoder[self.bod_token]
@property
def __a ( self ) -> int:
return self.encoder[self.eod_token]
@property
def __a ( self ) -> str:
return self.encoder["\n"]
@property
def __a ( self ) -> Dict:
return len(self.encoder )
def __a ( self ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = []
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def __a ( self , _a , **_a ) -> List[str]:
lowerCAmelCase_ = [i for i in token_ids if i >= 0]
lowerCAmelCase_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def __a ( self , _a ) -> Union[str, Any]:
return token in self.encoder
def __a ( self , _a ) -> Dict:
return "".join(a_ )
def __a ( self , _a ) -> Dict:
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def __a ( self , _a ) -> Union[str, Any]:
return self.decoder.get(a_ , self.unk_token )
def __a ( self , _a , _a = None ) -> int:
if os.path.isdir(a_ ):
lowerCAmelCase_ = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowerCAmelCase_ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowerCAmelCase_ = 0
if " " in self.encoder:
lowerCAmelCase_ = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowerCAmelCase_ = self.encoder['''\n''']
del self.encoder["\n"]
lowerCAmelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _a : x[1] ) )
with open(a_ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
lowerCAmelCase_ = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def __a ( self , _a , _a = None ) -> Dict:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self , _a , _a = None , _a = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 351
|
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22
| 0
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def A(__a: Dict ):
return int(x / 2**20 )
class __magic_name__ :
def __enter__( self ) -> Optional[int]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase_ = torch.cuda.memory_allocated()
return self
def __exit__( self , *_a ) -> int:
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase_ = torch.cuda.memory_allocated()
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
lowerCAmelCase_ = bamb(self.end - self.begin )
lowerCAmelCase_ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A(__a: Accelerator , __a: int = 16 , __a: str = "bert-base-cased" , __a: int = 320 , __a: int = 160 , ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained(__a )
lowerCAmelCase_ = load_dataset(
"glue" , "mrpc" , split={"train": F"train[:{n_train}]", "validation": F"validation[:{n_val}]"} )
def tokenize_function(__a: int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ = datasets.map(
__a , batched=__a , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__a: str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__a , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=__a , collate_fn=__a , batch_size=__a )
lowerCAmelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def A(__a: Any , __a: str ):
# Initialize accelerator
lowerCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config["lr"]
lowerCAmelCase_ = int(config["num_epochs"] )
lowerCAmelCase_ = int(config["seed"] )
lowerCAmelCase_ = int(config["batch_size"] )
lowerCAmelCase_ = args.model_name_or_path
set_seed(__a )
lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(__a , __a , __a , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
lowerCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowerCAmelCase_ = 1
lowerCAmelCase_ = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
lowerCAmelCase_ = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ = 0
# Now we train the model
lowerCAmelCase_ = {}
for epoch in range(__a , __a ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__a ):
lowerCAmelCase_ = model(**__a )
lowerCAmelCase_ = outputs.loss
lowerCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase_ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__a , __a )
def A():
lowerCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__a , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__a , )
parser.add_argument(
"--output_dir" , type=__a , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__a , default=__a , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__a , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__a , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__a , default=1 , help="Number of train epochs." , )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 353
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def A(__a: List[str] , __a: str=None ):
lowerCAmelCase_ = None
if token is not None:
lowerCAmelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
lowerCAmelCase_ = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
lowerCAmelCase_ = requests.get(snake_case_ , headers=snake_case_ ).json()
lowerCAmelCase_ = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
lowerCAmelCase_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(snake_case_ ):
lowerCAmelCase_ = requests.get(url + F"&page={i + 2}" , headers=snake_case_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A(__a: Union[str, Any] , __a: List[str]=None ):
lowerCAmelCase_ = None
if token is not None:
lowerCAmelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
lowerCAmelCase_ = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
lowerCAmelCase_ = requests.get(snake_case_ , headers=snake_case_ ).json()
lowerCAmelCase_ = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
lowerCAmelCase_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(snake_case_ ):
lowerCAmelCase_ = requests.get(url + F"&page={i + 2}" , headers=snake_case_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A(__a: Dict , __a: Dict , __a: Union[str, Any] , __a: Tuple ):
lowerCAmelCase_ = None
if token is not None:
lowerCAmelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
lowerCAmelCase_ = requests.get(snake_case_ , headers=snake_case_ , allow_redirects=snake_case_ )
lowerCAmelCase_ = result.headers["""Location"""]
lowerCAmelCase_ = requests.get(snake_case_ , allow_redirects=snake_case_ )
lowerCAmelCase_ = os.path.join(snake_case_ , F"{artifact_name}.zip" )
with open(snake_case_ , "wb" ) as fp:
fp.write(response.content )
def A(__a: Optional[Any] , __a: List[str]=None ):
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
lowerCAmelCase_ = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase_ = line[: line.index(": " )]
lowerCAmelCase_ = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
lowerCAmelCase_ = line[len("FAILED " ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
lowerCAmelCase_ = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` "
F"and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
lowerCAmelCase_ = None
if job_name and job_links:
lowerCAmelCase_ = job_links.get(snake_case_ , snake_case_ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase_ = [x + [y] + [job_link] for x, y in zip(snake_case_ , snake_case_ )]
return result
def A(__a: List[str] , __a: Tuple=None ):
lowerCAmelCase_ = []
lowerCAmelCase_ = [os.path.join(snake_case_ , snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_ , job_links=snake_case_ ) )
return errors
def A(__a: Optional[Any] , __a: Optional[Any]=None ):
lowerCAmelCase_ = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase_ = counter.most_common()
lowerCAmelCase_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase_ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase_ = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=snake_case_ ) )
return r
def A(__a: Dict ):
lowerCAmelCase_ = test.split("::" )[0]
if test.startswith("tests/models/" ):
lowerCAmelCase_ = test.split("/" )[2]
else:
lowerCAmelCase_ = None
return test
def A(__a: Tuple , __a: List[str]=None ):
lowerCAmelCase_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase_ = [x for x in logs if x[2] is not None]
lowerCAmelCase_ = {x[2] for x in logs}
lowerCAmelCase_ = {}
for test in tests:
lowerCAmelCase_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase_ = counter.most_common()
lowerCAmelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase_ = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase_ = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase_ = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=snake_case_ ) )
return r
def A(__a: Union[str, Any] ):
lowerCAmelCase_ = """| no. | error | status |"""
lowerCAmelCase_ = """|-:|:-|:-|"""
lowerCAmelCase_ = [header, sep]
for error in reduced_by_error:
lowerCAmelCase_ = reduced_by_error[error]["""count"""]
lowerCAmelCase_ = F"| {count} | {error[:100]} | |"
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def A(__a: Dict ):
lowerCAmelCase_ = """| model | no. of errors | major error | count |"""
lowerCAmelCase_ = """|-:|-:|-:|-:|"""
lowerCAmelCase_ = [header, sep]
for model in reduced_by_model:
lowerCAmelCase_ = reduced_by_model[model]["""count"""]
lowerCAmelCase_ = list(reduced_by_model[model]["errors"].items() )[0]
lowerCAmelCase_ = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
lowerCamelCase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCamelCase__ = get_job_links(args.workflow_run_id, token=args.token)
lowerCamelCase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCamelCase__ = k.find(''' / ''')
lowerCamelCase__ = k[index + len(''' / ''') :]
lowerCamelCase__ = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCamelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCamelCase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCamelCase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCamelCase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCamelCase__ = reduce_by_error(errors)
lowerCamelCase__ = reduce_by_model(errors)
lowerCamelCase__ = make_github_table(reduced_by_error)
lowerCamelCase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 354
|
import datasets
lowerCamelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCamelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A(__a: Dict , __a: Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __a ( self , _a , _a ) -> List[str]:
return {"accuracy": simple_accuracy(_a , _a )}
| 22
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __magic_name__ (__UpperCamelCase ):
lowerCamelCase__ = """owlvit_text_model"""
def __init__( self , _a=49408 , _a=512 , _a=2048 , _a=12 , _a=8 , _a=16 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.0_2 , _a=1.0 , _a=0 , _a=49406 , _a=49407 , **_a , ) -> Any:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = initializer_factor
@classmethod
def __a ( cls , _a , **_a ) -> int:
cls._set_token_in_kwargs(_a )
lowerCAmelCase_ = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
lowerCAmelCase_ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
class __magic_name__ (__UpperCamelCase ):
lowerCamelCase__ = """owlvit_vision_model"""
def __init__( self , _a=768 , _a=3072 , _a=12 , _a=12 , _a=3 , _a=768 , _a=32 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.0_2 , _a=1.0 , **_a , ) -> List[str]:
super().__init__(**_a )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = initializer_factor
@classmethod
def __a ( cls , _a , **_a ) -> str:
cls._set_token_in_kwargs(_a )
lowerCAmelCase_ = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
lowerCAmelCase_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
class __magic_name__ (__UpperCamelCase ):
lowerCamelCase__ = """owlvit"""
lowerCamelCase__ = True
def __init__( self , _a=None , _a=None , _a=512 , _a=2.6_5_9_2 , _a=True , **_a , ) -> List[str]:
super().__init__(**_a )
if text_config is None:
lowerCAmelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
lowerCAmelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
lowerCAmelCase_ = OwlViTTextConfig(**_a )
lowerCAmelCase_ = OwlViTVisionConfig(**_a )
lowerCAmelCase_ = projection_dim
lowerCAmelCase_ = logit_scale_init_value
lowerCAmelCase_ = return_dict
lowerCAmelCase_ = 1.0
@classmethod
def __a ( cls , _a , **_a ) -> Any:
cls._set_token_in_kwargs(_a )
lowerCAmelCase_ = cls.get_config_dict(_a , **_a )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
@classmethod
def __a ( cls , _a , _a , **_a ) -> int:
lowerCAmelCase_ = {}
lowerCAmelCase_ = text_config
lowerCAmelCase_ = vision_config
return cls.from_dict(_a , **_a )
def __a ( self ) -> Dict:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.text_config.to_dict()
lowerCAmelCase_ = self.vision_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
class __magic_name__ (__UpperCamelCase ):
@property
def __a ( self ) -> List[str]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def __a ( self ) -> int:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def __a ( self ) -> Dict:
return 1E-4
def __a ( self , _a , _a = -1 , _a = -1 , _a = None , ) -> Dict:
lowerCAmelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_a , seq_length=_a , framework=_a )
lowerCAmelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_a , framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def __a ( self ) -> Dict:
return 14
| 355
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = '''bert-base-cased'''
lowerCamelCase__ = '''google/pegasus-xsum'''
lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''sshleifer/tiny-mbart'''
lowerCamelCase__ = '''sshleifer/tiny-marian-en-de'''
def A(__a: Path , __a: list ):
lowerCAmelCase_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
def A(__a: str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , F"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ (__lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase_ = {x.name for x in save_dir.iterdir()}
lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 )
lowerCAmelCase_ = 64
lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
lowerCAmelCase_ = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for batch in data_loader:
lowerCAmelCase_ = batch["input_ids"].shape
lowerCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"too many tokens in {len(_a )} batches" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 )
lowerCAmelCase_ = 2
lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
lowerCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __a ( self , _a=1000 , _a=128 ) -> str:
if os.getenv("USE_REAL_DATA" , _a ):
lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCAmelCase_ = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCAmelCase_ = max_len * 4
save_len_file(_a , _a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset()
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 22
| 0
|
import os
from math import logaa
def A(__a: str = "base_exp.txt" ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ = list(map(A__ , line.split("," ) ) )
if x * logaa(A__ ) > largest:
lowerCAmelCase_ = x * logaa(A__ )
lowerCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 356
|
def A(__a: Optional[Any] ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = sum(__a )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff
| 22
| 0
|
def A(__a: Union[str, Any] ):
def merge(__a: Tuple , __a: Optional[Any] ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__a ) <= 1:
return collection
lowerCAmelCase_ = len(__a ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 357
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22
| 0
|
from collections import defaultdict
def A(__a: int ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def A():
dfs(1 )
if __name__ == "__main__":
lowerCamelCase__ = 10, 9
lowerCamelCase__ = defaultdict(list)
lowerCamelCase__ = {}
lowerCamelCase__ = []
lowerCamelCase__ = 0
lowerCamelCase__ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 358
|
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A(__a: str ):
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 22
| 0
|
"""simple docstring"""
import argparse
import struct
import unittest
class __magic_name__ :
def __init__( self , _a ) -> List[str]:
lowerCAmelCase_ = data
# Initialize hash values
lowerCAmelCase_ = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19,
]
# Initialize round constants
lowerCAmelCase_ = [
0x428a2f98,
0x71374491,
0xb5c0fbcf,
0xe9b5dba5,
0x3956c25b,
0x59f111f1,
0x923f82a4,
0xab1c5ed5,
0xd807aa98,
0x12835b01,
0x243185be,
0x550c7dc3,
0x72be5d74,
0x80deb1fe,
0x9bdc06a7,
0xc19bf174,
0xe49b69c1,
0xefbe4786,
0x0fc19dc6,
0x240ca1cc,
0x2de92c6f,
0x4a7484aa,
0x5cb0a9dc,
0x76f988da,
0x983e5152,
0xa831c66d,
0xb00327c8,
0xbf597fc7,
0xc6e00bf3,
0xd5a79147,
0x06ca6351,
0x14292967,
0x27b70a85,
0x2e1b2138,
0x4d2c6dfc,
0x53380d13,
0x650a7354,
0x766a0abb,
0x81c2c92e,
0x92722c85,
0xa2bfe8a1,
0xa81a664b,
0xc24b8b70,
0xc76c51a3,
0xd192e819,
0xd6990624,
0xf40e3585,
0x106aa070,
0x19a4c116,
0x1e376c08,
0x2748774c,
0x34b0bcb5,
0x391c0cb3,
0x4ed8aa4a,
0x5b9cca4f,
0x682e6ff3,
0x748f82ee,
0x78a5636f,
0x84c87814,
0x8cc70208,
0x90befffa,
0xa4506ceb,
0xbef9a3f7,
0xc67178f2,
]
lowerCAmelCase_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __a ( _a ) -> Union[str, Any]:
lowerCAmelCase_ = b"\x80" + (b"\x00" * (63 - (len(__lowerCAmelCase ) + 8) % 64))
lowerCAmelCase_ = struct.pack(">Q" , (len(__lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def __a ( self ) -> int:
lowerCAmelCase_ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCAmelCase_ = list(struct.unpack(">16L" , __lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCAmelCase_ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCAmelCase_ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCAmelCase_ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
lowerCAmelCase_ = self.ror(__lowerCAmelCase , 6 ) ^ self.ror(__lowerCAmelCase , 11 ) ^ self.ror(__lowerCAmelCase , 25 )
lowerCAmelCase_ = (e & f) ^ ((~e & 0xffffffff) & g)
lowerCAmelCase_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
lowerCAmelCase_ = self.ror(__lowerCAmelCase , 2 ) ^ self.ror(__lowerCAmelCase , 13 ) ^ self.ror(__lowerCAmelCase , 22 )
lowerCAmelCase_ = (a & b) ^ (a & c) ^ (b & c)
lowerCAmelCase_ = (sa + maj) % 0x100000000
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
lowerCAmelCase_ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCAmelCase_ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
lowerCAmelCase_ = "".join([hex(__lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def __a ( self , _a , _a ) -> Any:
return 0xffffffff & (value << (32 - rotations)) | (value >> rotations)
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Optional[int]:
import hashlib
lowerCAmelCase_ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCAmelCase ).hash , hashlib.shaaaa(__lowerCAmelCase ).hexdigest() )
def A():
import doctest
doctest.testmod()
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
lowerCAmelCase_ = f.read()
else:
lowerCAmelCase_ = bytes(lowerCAmelCase__ , "utf-8" )
print(SHAaaa(lowerCAmelCase__ ).hash )
if __name__ == "__main__":
main()
| 359
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 0
|
from functools import reduce
lowerCamelCase__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def A(__a: Union[str, Any] = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __a , __a : str(int(a__ ) * int(a__ ) ) , n[i : i + 13] ) )
for i in range(len(a__ ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 360
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22
| 0
|
from numpy import exp, pi, sqrt
def A(__a: Tuple , __a: List[str] = 0.0 , __a: Any = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22
| 0
|
from ..utils import DummyObject, requires_backends
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''keras_nlp''']
def __init__( self , *_a , **_a ) -> Tuple:
requires_backends(self , ["keras_nlp"] )
| 362
|
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22
| 0
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 363
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22
| 0
|
def A(__a: List[Any] ):
lowerCAmelCase_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCAmelCase_ = 1
for n in range(m + 1 ):
for k in range(1 , _UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCamelCase__ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowerCamelCase__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __magic_name__ (lowerCamelCase_ ):
lowerCamelCase__ = """trajectory_transformer"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _a=100 , _a=5 , _a=1 , _a=1 , _a=249 , _a=6 , _a=17 , _a=25 , _a=4 , _a=4 , _a=128 , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0_0_0_6 , _a=512 , _a=0.0_2 , _a=1E-12 , _a=1 , _a=True , _a=1 , _a=50256 , _a=50256 , **_a , ) -> Dict:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = action_weight
lowerCAmelCase_ = reward_weight
lowerCAmelCase_ = value_weight
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = block_size
lowerCAmelCase_ = action_dim
lowerCAmelCase_ = observation_dim
lowerCAmelCase_ = transition_dim
lowerCAmelCase_ = learning_rate
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = n_embd
lowerCAmelCase_ = embd_pdrop
lowerCAmelCase_ = attn_pdrop
lowerCAmelCase_ = resid_pdrop
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = kaiming_initializer_range
lowerCAmelCase_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 365
|
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __magic_name__ (UpperCamelCase__ ):
def __init__( self , _a , _a = None , _a = None , _a = True , _a = None , _a = False , _a = None , _a = True , _a = "arrow" , **_a , ) -> List[Any]:
super().__init__(
split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , **__lowerCamelCase , )
lowerCAmelCase_ = load_from_cache_file
lowerCAmelCase_ = file_format
lowerCAmelCase_ = Spark(
df=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , working_dir=__lowerCamelCase , **__lowerCamelCase , )
def __a ( self ) -> Optional[int]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 366
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22
| 0
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A(__a: str , __a: Tuple , __a: Tuple , __a: Optional[Any] , __a: Union[str, Any] ):
# load base model
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase_ = load_file(__lowerCamelCase )
lowerCAmelCase_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase_ = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
lowerCAmelCase_ = pipeline.text_encoder
else:
lowerCAmelCase_ = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
lowerCAmelCase_ = pipeline.unet
# find the target layer
lowerCAmelCase_ = layer_infos.pop(0 )
while len(__lowerCamelCase ) > -1:
try:
lowerCAmelCase_ = curr_layer.__getattr__(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCAmelCase_ = layer_infos.pop(0 )
elif len(__lowerCamelCase ) == 0:
break
except Exception:
if len(__lowerCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase_ = layer_infos.pop(0 )
lowerCAmelCase_ = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(__lowerCamelCase )
else:
pair_keys.append(__lowerCamelCase )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCamelCase , __lowerCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCamelCase , __lowerCamelCase )
# update visited list
for item in pair_keys:
visited.append(__lowerCamelCase )
return pipeline
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.base_model_path
lowerCamelCase__ = args.checkpoint_path
lowerCamelCase__ = args.dump_path
lowerCamelCase__ = args.lora_prefix_unet
lowerCamelCase__ = args.lora_prefix_text_encoder
lowerCamelCase__ = args.alpha
lowerCamelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 367
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22
| 0
|
def A(__a: list , __a: int = 0 ):
lowerCAmelCase_ = length or len(_snake_case )
lowerCAmelCase_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowerCAmelCase_ = list_data[i + 1], list_data[i]
lowerCAmelCase_ = True
return list_data if not swapped else bubble_sort(_snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22
| 0
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def A(__a: int , __a: Optional[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = state_dict.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowerCAmelCase_ = value
else:
lowerCAmelCase_ = value
return new_state_dict
def A(__a: List[str] ):
lowerCAmelCase_ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
lowerCAmelCase_ = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:256, :]
lowerCAmelCase_ = in_proj_bias[:256]
lowerCAmelCase_ = in_proj_weight[256:512, :]
lowerCAmelCase_ = in_proj_bias[256:512]
lowerCAmelCase_ = in_proj_weight[-256:, :]
lowerCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase_ = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
lowerCAmelCase_ = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:256, :]
lowerCAmelCase_ = in_proj_bias[:256]
lowerCAmelCase_ = in_proj_weight[256:512, :]
lowerCAmelCase_ = in_proj_bias[256:512]
lowerCAmelCase_ = in_proj_weight[-256:, :]
lowerCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase_ = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
lowerCAmelCase_ = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase_ = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[:256]
lowerCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[256:512]
lowerCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[-256:]
def A(__a: str , __a: Dict ):
lowerCAmelCase_ = image.size
lowerCAmelCase_ = max(__a , __a )
lowerCAmelCase_ = 800 if "detection" in checkpoint_url else 1000
lowerCAmelCase_ = target_max_size / current_max_size
lowerCAmelCase_ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def A(__a: Dict ):
lowerCAmelCase_ = F.to_tensor(__a )
lowerCAmelCase_ = F.normalize(__a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def A(__a: Optional[int] , __a: Tuple , __a: List[Any] ):
logger.info("Converting model..." )
# load original state dict
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(__a , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase_ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowerCAmelCase_ = state_dict.pop(__a )
lowerCAmelCase_ = val
# create HuggingFace model and load state dict
lowerCAmelCase_ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowerCAmelCase_ = 15
lowerCAmelCase_ = 2
lowerCAmelCase_ = {0: "table", 1: "table rotated"}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase_ = 125
lowerCAmelCase_ = 6
lowerCAmelCase_ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowerCAmelCase_ = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
lowerCAmelCase_ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowerCAmelCase_ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__a )
lowerCAmelCase_ = Image.open(__a ).convert("RGB" )
lowerCAmelCase_ = normalize(resize(__a , __a ) ).unsqueeze(0 )
lowerCAmelCase_ = model(__a )
if "detection" in checkpoint_url:
lowerCAmelCase_ = (1, 15, 3)
lowerCAmelCase_ = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
lowerCAmelCase_ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
lowerCAmelCase_ = (1, 125, 7)
lowerCAmelCase_ = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
lowerCAmelCase_ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowerCAmelCase_ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 369
|
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22
| 0
|
import re
def A(__a: int ):
return [char.split() for char in re.split(r"[^ a-z A-Z 0-9 \s]" , str_ )]
def A(__a: str ):
lowerCAmelCase_ = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def A(__a: Tuple , __a: Optional[int] , __a: Any ):
try:
lowerCAmelCase_ = split_input(_lowerCAmelCase )
if upper:
lowerCAmelCase_ = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase_ = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def A(__a: Optional[Any] ):
return to_simple_case(_lowerCAmelCase )
def A(__a: int ):
try:
lowerCAmelCase_ = to_simple_case(_lowerCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def A(__a: Optional[int] , __a: Tuple ):
return to_complex_case(_lowerCAmelCase , _lowerCAmelCase , "_" )
def A(__a: List[str] , __a: str ):
return to_complex_case(_lowerCAmelCase , _lowerCAmelCase , "-" )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 370
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22
| 0
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __magic_name__ (__lowerCamelCase , unittest.TestCase ):
lowerCamelCase__ = WavaVecaPhonemeCTCTokenizer
lowerCamelCase__ = False
def __a ( self ) -> Dict:
super().setUp()
lowerCAmelCase_ = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(" " )
lowerCAmelCase_ = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
lowerCAmelCase_ = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
def __a ( self , _a , _a=False , _a=20 , _a=5 ) -> int:
lowerCAmelCase_ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )) for i in range(len(__lowercase ) )]
lowerCAmelCase_ = list(filter(lambda _a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ = ''' ''' + output_txt
lowerCAmelCase_ = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def __a ( self , **_a ) -> int:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
lowerCAmelCase_ = tokenizer("m xxx ɪ" , do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
lowerCAmelCase_ = tokenizer("m aaa ɪ ccc" , do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCAmelCase_ = tokenizer("maɪ c" , do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase , [3, 200] ) # mai should be <unk> (=3)
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCAmelCase_ = '''Hello how are you'''
lowerCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang="en-us" )
self.assertEqual(__lowercase , "h ə l oʊ h aʊ ɑːɹ j uː" )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCAmelCase_ = '''Hello how are you'''
lowerCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowercase ).input_ids , tokenizer(__lowercase , do_phonemize=__lowercase ).input_ids )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCAmelCase_ = '''Hello how are you'''
lowerCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang="en-us" )
lowerCAmelCase_ = tokenizer.decode(tokenizer(__lowercase ).input_ids )
self.assertEqual(__lowercase , __lowercase )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCAmelCase_ = tokenizer.decode(sample_ids[0] )
lowerCAmelCase_ = tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase , batch_tokens[0] )
self.assertEqual(__lowercase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCAmelCase_ = '''Hello how are you'''
lowerCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang="en-us" )
self.assertEqual(__lowercase , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCAmelCase_ = '''Hello how are you'''
lowerCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowercase ).input_ids , tokenizer(__lowercase , do_phonemize=__lowercase ).input_ids )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
lowerCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCAmelCase_ = tokenizer.decode(sample_ids[0] )
lowerCAmelCase_ = tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase , batch_tokens[0] )
self.assertEqual(__lowercase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
lowerCAmelCase_ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowercase )
lowerCAmelCase_ = tokenizer.batch_decode(__lowercase , filter_word_delimiter_token=__lowercase )
self.assertEqual(__lowercase , batch_tokens[0] )
self.assertEqual(__lowercase , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCAmelCase_ = '''Hello how are you'''
lowerCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang="en-us" )
lowerCAmelCase_ = tokenizer.decode(tokenizer(__lowercase ).input_ids , filter_word_delimiter_token=__lowercase )
self.assertEqual(__lowercase , __lowercase )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCAmelCase_ = '''Hello how are you'''
lowerCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang="en-us" )
lowerCAmelCase_ = tokenizer.decode(tokenizer(__lowercase ).input_ids , filter_word_delimiter_token=__lowercase )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __lowercase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__lowercase )
lowerCAmelCase_ = '''Hello how are you'''
lowerCAmelCase_ = tokenizer(__lowercase , phonemizer_lang="en-us" ).input_ids
lowerCAmelCase_ = tokenizer(__lowercase , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__lowercase , __lowercase )
lowerCAmelCase_ = tokenizer.decode(__lowercase )
lowerCAmelCase_ = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__lowercase , "ɛ l o h aʊ a ʁ j u" )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCAmelCase_ = '''Hello how Are you'''
lowerCAmelCase_ = '''hello how are you'''
lowerCAmelCase_ = tokenizer(__lowercase ).input_ids
lowerCAmelCase_ = tokenizer(__lowercase ).input_ids
self.assertEqual(__lowercase , __lowercase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
lowerCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCAmelCase_ = tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def __a ( _a , _a ) -> Optional[int]:
lowerCAmelCase_ = [d[key] for d in offsets]
return retrieved_list
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCAmelCase_ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCAmelCase_ = tokenizer.decode(__lowercase , output_char_offsets=__lowercase , filter_word_delimiter_token=__lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__lowercase , __lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __a ( self ) -> str:
lowerCAmelCase_ = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(_a , _a ):
self.assertTrue(isinstance(__lowercase , __lowercase ) )
self.assertTrue(isinstance(outputs_list[0] , __lowercase ) )
# transform list to ModelOutput
lowerCAmelCase_ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(_a , _a ):
if isinstance(__lowercase , __lowercase ):
[recursive_check(__lowercase , __lowercase ) for la, la in zip(__lowercase , __lowercase )]
self.assertEqual(__lowercase , __lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
lowerCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCAmelCase_ = tokenizer.batch_decode(__lowercase , output_char_offsets=__lowercase )
lowerCAmelCase_ = [tokenizer.decode(__lowercase , output_char_offsets=__lowercase ) for ids in sample_ids]
check_list_tuples_equal(__lowercase , __lowercase )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def __a ( self ) -> str:
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def __a ( self ) -> str:
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def __a ( self ) -> Dict:
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def __a ( self ) -> Union[str, Any]:
pass
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase_ = tokenizer.vocab_size
lowerCAmelCase_ = len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase_ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
lowerCAmelCase_ = tokenizer.add_tokens(__lowercase )
lowerCAmelCase_ = tokenizer.vocab_size
lowerCAmelCase_ = len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
lowerCAmelCase_ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase_ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
lowerCAmelCase_ = tokenizer.add_special_tokens(__lowercase )
lowerCAmelCase_ = tokenizer.vocab_size
lowerCAmelCase_ = len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
lowerCAmelCase_ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode." )
def __a ( self ) -> List[str]:
pass
@unittest.skip("The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode." )
def __a ( self ) -> Optional[int]:
pass
def __a ( self ) -> Union[str, Any]:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
lowerCAmelCase_ = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase_ = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
lowerCAmelCase_ = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(output["text"] , __lowercase )
| 371
|
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22
| 0
|
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCamelCase__ = 6_378_137.0
lowerCamelCase__ = 6_356_752.314_245
lowerCamelCase__ = 6_37_81_37
def A(__a: Union[str, Any] , __a: Tuple , __a: Dict , __a: List[Any] ):
lowerCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
lowerCAmelCase_ = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
lowerCAmelCase_ = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
lowerCAmelCase_ = radians(_UpperCAmelCase )
lowerCAmelCase_ = radians(_UpperCAmelCase )
# Equation
lowerCAmelCase_ = sin((phi_a - phi_a) / 2 )
lowerCAmelCase_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowerCAmelCase_ = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class __magic_name__ :
def __init__( self , _a=None , **_a ) -> Any:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
lowerCAmelCase_ = model
lowerCAmelCase_ = kwargs.get("model_save_dir" , __SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = kwargs.get("latest_model_name" , __SCREAMING_SNAKE_CASE )
def __call__( self , **_a ) -> Union[str, Any]:
lowerCAmelCase_ = {k: np.array(__SCREAMING_SNAKE_CASE ) for k, v in kwargs.items()}
return self.model.run(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@staticmethod
def __a ( _a , _a=None , _a=None ) -> Dict:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
lowerCAmelCase_ = "CPUExecutionProvider"
return ort.InferenceSession(__SCREAMING_SNAKE_CASE , providers=[provider] , sess_options=__SCREAMING_SNAKE_CASE )
def __a ( self , _a , _a = None , **_a ) -> int:
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase_ = self.model_save_dir.joinpath(self.latest_model_name )
lowerCAmelCase_ = Path(__SCREAMING_SNAKE_CASE ).joinpath(__SCREAMING_SNAKE_CASE )
try:
shutil.copyfile(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase_ = self.model_save_dir.joinpath(__SCREAMING_SNAKE_CASE )
if src_path.exists():
lowerCAmelCase_ = Path(__SCREAMING_SNAKE_CASE ).joinpath(__SCREAMING_SNAKE_CASE )
try:
shutil.copyfile(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except shutil.SameFileError:
pass
def __a ( self , _a , **_a , ) -> int:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
# saving model weights/files
self._save_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@classmethod
def __a ( cls , _a , _a = None , _a = None , _a = False , _a = None , _a = None , _a = None , _a = None , **_a , ) -> Tuple:
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = OnnxRuntimeModel.load_model(
os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , provider=__SCREAMING_SNAKE_CASE , sess_options=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = Path(__SCREAMING_SNAKE_CASE )
# load model from hub
else:
# download model
lowerCAmelCase_ = hf_hub_download(
repo_id=__SCREAMING_SNAKE_CASE , filename=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase_ = Path(__SCREAMING_SNAKE_CASE ).parent
lowerCAmelCase_ = Path(__SCREAMING_SNAKE_CASE ).name
lowerCAmelCase_ = OnnxRuntimeModel.load_model(__SCREAMING_SNAKE_CASE , provider=__SCREAMING_SNAKE_CASE , sess_options=__SCREAMING_SNAKE_CASE )
return cls(model=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@classmethod
def __a ( cls , _a , _a = True , _a = None , _a = None , **_a , ) -> Union[str, Any]:
lowerCAmelCase_ = None
if len(str(__SCREAMING_SNAKE_CASE ).split("@" ) ) == 2:
lowerCAmelCase_ , lowerCAmelCase_ = model_id.split("@" )
return cls._from_pretrained(
model_id=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 351
|
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22
| 0
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def A(__a: Any ):
lowerCAmelCase_ = model.config
lowerCAmelCase_ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowerCAmelCase_ = MBartConfig(
is_decoder=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , add_cross_attention=lowerCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowerCamelCase__ , add_final_layer_norm=lowerCamelCase__ , )
return encoder_config, decoder_config
def A(__a: int ):
if "encoder.model" in name:
lowerCAmelCase_ = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
lowerCAmelCase_ = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
lowerCAmelCase_ = "encoder." + name
if "attn.proj" in name:
lowerCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
lowerCAmelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCAmelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCAmelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase_ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
lowerCAmelCase_ = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
lowerCAmelCase_ = "encoder.layernorm.bias"
return name
def A(__a: int , __a: str ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCAmelCase_ = key.split("." )
lowerCAmelCase_ = int(key_split[3] )
lowerCAmelCase_ = int(key_split[5] )
lowerCAmelCase_ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCAmelCase_ = val
return orig_state_dict
def A(__a: int , __a: Dict=None , __a: List[str]=False ):
# load original model
lowerCAmelCase_ = DonutModel.from_pretrained(lowerCamelCase__ ).eval()
# load HuggingFace model
lowerCAmelCase_ , lowerCAmelCase_ = get_configs(lowerCamelCase__ )
lowerCAmelCase_ = DonutSwinModel(lowerCamelCase__ )
lowerCAmelCase_ = MBartForCausalLM(lowerCamelCase__ )
lowerCAmelCase_ = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
lowerCAmelCase_ = original_model.state_dict()
lowerCAmelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify results on scanned document
lowerCAmelCase_ = load_dataset("hf-internal-testing/example-documents" )
lowerCAmelCase_ = dataset["test"][0]["image"].convert("RGB" )
lowerCAmelCase_ = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase__ , from_slow=lowerCamelCase__ )
lowerCAmelCase_ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCAmelCase_ = DonutProcessor(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_ = processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCAmelCase_ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
lowerCAmelCase_ = "When is the coffee break?"
lowerCAmelCase_ = task_prompt.replace("{user_input}" , lowerCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCAmelCase_ = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCAmelCase_ = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCAmelCase_ = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCAmelCase_ = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCAmelCase_ = "hello world"
else:
raise ValueError("Model name not supported" )
lowerCAmelCase_ = original_model.decoder.tokenizer(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors="pt" )[
"input_ids"
]
lowerCAmelCase_ = original_model.encoder.model.patch_embed(lowerCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = model.encoder.embeddings(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
# verify encoder hidden states
lowerCAmelCase_ = original_model.encoder(lowerCamelCase__ )
lowerCAmelCase_ = model.encoder(lowerCamelCase__ ).last_hidden_state
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
# verify decoder hidden states
lowerCAmelCase_ = original_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).logits
lowerCAmelCase_ = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
lowerCamelCase__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 352
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22
| 0
|
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def A(__a: Any ):
lowerCAmelCase_ = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
lowerCAmelCase_ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class __magic_name__ (__lowercase ):
def __a ( self , **_a ) -> Tuple:
lowerCAmelCase_ = {}
if "second_text" in kwargs:
lowerCAmelCase_ = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def __a ( self , _a , _a=None ) -> Tuple:
return self.tokenizer(_lowerCAmelCase , text_pair=_lowerCAmelCase , return_tensors=self.framework )
def __a ( self , _a ) -> Union[str, Any]:
return self.model(**_lowerCAmelCase )
def __a ( self , _a ) -> Union[str, Any]:
lowerCAmelCase_ = model_outputs.logits[0].numpy()
lowerCAmelCase_ = softmax(_lowerCAmelCase )
lowerCAmelCase_ = np.argmax(_lowerCAmelCase )
lowerCAmelCase_ = self.model.config.idalabel[best_class]
lowerCAmelCase_ = probabilities[best_class].item()
lowerCAmelCase_ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 353
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22
| 0
|
from __future__ import annotations
def A(__a: float , __a: float , __a: float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
import datasets
lowerCamelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCamelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A(__a: Dict , __a: Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __a ( self , _a , _a ) -> List[str]:
return {"accuracy": simple_accuracy(_a , _a )}
| 22
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __magic_name__ (snake_case_ ):
lowerCamelCase__ = '''deformable_detr'''
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _a=True , _a=None , _a=3 , _a=300 , _a=1024 , _a=6 , _a=1024 , _a=8 , _a=6 , _a=1024 , _a=8 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.0_2 , _a=1.0 , _a=True , _a=False , _a="sine" , _a="resnet50" , _a=True , _a=False , _a=4 , _a=4 , _a=4 , _a=False , _a=300 , _a=False , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , _a=0.2_5 , _a=False , **_a , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCAmelCase_ = CONFIG_MAPPING['resnet'](out_features=["stage4"] )
elif isinstance(_a , _a ):
lowerCAmelCase_ = backbone_config.get("model_type" )
lowerCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ = config_class.from_dict(_a )
lowerCAmelCase_ = use_timm_backbone
lowerCAmelCase_ = backbone_config
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = init_xavier_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = auxiliary_loss
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = backbone
lowerCAmelCase_ = use_pretrained_backbone
lowerCAmelCase_ = dilation
# deformable attributes
lowerCAmelCase_ = num_feature_levels
lowerCAmelCase_ = encoder_n_points
lowerCAmelCase_ = decoder_n_points
lowerCAmelCase_ = two_stage
lowerCAmelCase_ = two_stage_num_proposals
lowerCAmelCase_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCAmelCase_ = class_cost
lowerCAmelCase_ = bbox_cost
lowerCAmelCase_ = giou_cost
# Loss coefficients
lowerCAmelCase_ = mask_loss_coefficient
lowerCAmelCase_ = dice_loss_coefficient
lowerCAmelCase_ = bbox_loss_coefficient
lowerCAmelCase_ = giou_loss_coefficient
lowerCAmelCase_ = eos_coefficient
lowerCAmelCase_ = focal_alpha
lowerCAmelCase_ = disable_custom_kernels
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __a ( self ) -> int:
return self.encoder_attention_heads
@property
def __a ( self ) -> int:
return self.d_model
def __a ( self ) -> Dict:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase_ = self.backbone_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 355
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = '''bert-base-cased'''
lowerCamelCase__ = '''google/pegasus-xsum'''
lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''sshleifer/tiny-mbart'''
lowerCamelCase__ = '''sshleifer/tiny-marian-en-de'''
def A(__a: Path , __a: list ):
lowerCAmelCase_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
def A(__a: str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , F"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ (__lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase_ = {x.name for x in save_dir.iterdir()}
lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 )
lowerCAmelCase_ = 64
lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
lowerCAmelCase_ = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for batch in data_loader:
lowerCAmelCase_ = batch["input_ids"].shape
lowerCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"too many tokens in {len(_a )} batches" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 )
lowerCAmelCase_ = 2
lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
lowerCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __a ( self , _a=1000 , _a=128 ) -> str:
if os.getenv("USE_REAL_DATA" , _a ):
lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCAmelCase_ = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCAmelCase_ = max_len * 4
save_len_file(_a , _a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset()
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 22
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase__ = "pt"
elif is_tf_available():
lowerCamelCase__ = "tf"
else:
lowerCamelCase__ = "jax"
class __magic_name__ (__lowerCAmelCase , unittest.TestCase ):
lowerCamelCase__ = ByTaTokenizer
lowerCamelCase__ = False
def __a ( self ) -> Union[str, Any]:
super().setUp()
lowerCAmelCase_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self ) -> str:
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def __a ( self , **_a ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def __a ( self , _a , _a=False , _a=20 , _a=5 ) -> Tuple[str, list]:
lowerCAmelCase_ = []
for i in range(len(lowerCamelCase__ ) ):
try:
lowerCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ = list(filter(lambda _a : re.match(R"^[ a-zA-Z]+$" , t[1] ) , lowerCamelCase__ ) )
lowerCAmelCase_ = list(filter(lambda _a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase__ ) , lowerCamelCase__ ) )
if max_length is not None and len(lowerCamelCase__ ) > max_length:
lowerCAmelCase_ = toks[:max_length]
if min_length is not None and len(lowerCamelCase__ ) < min_length and len(lowerCamelCase__ ) > 0:
while len(lowerCamelCase__ ) < min_length:
lowerCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
if " " not in output_txt and len(lowerCamelCase__ ) > 1:
lowerCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase__ )
)
if with_prefix_space:
lowerCAmelCase_ = ''' ''' + output_txt
lowerCAmelCase_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
return output_txt, output_ids
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.ta_base_tokenizer
lowerCAmelCase_ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowerCAmelCase_ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def __a ( self ) -> int:
lowerCAmelCase_ = self.ta_base_tokenizer
lowerCAmelCase_ = '''Unicode €.'''
lowerCAmelCase_ = tokenizer(lowerCamelCase__ )
lowerCAmelCase_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , lowerCamelCase__ )
# decoding
lowerCAmelCase_ = tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , "Unicode €.</s>" )
lowerCAmelCase_ = tokenizer("e è é ê ë" )
lowerCAmelCase_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , lowerCamelCase__ )
# decoding
lowerCAmelCase_ = tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.ta_base_tokenizer
lowerCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCAmelCase_ = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
if FRAMEWORK != "jax":
lowerCAmelCase_ = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __a ( self ) -> int:
lowerCAmelCase_ = self.ta_base_tokenizer
lowerCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , lowerCamelCase__ )
self.assertIn("attention_mask" , lowerCamelCase__ )
self.assertNotIn("decoder_input_ids" , lowerCamelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCamelCase__ )
def __a ( self ) -> str:
lowerCAmelCase_ = self.ta_base_tokenizer
lowerCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ = tokenizer(
text_target=lowerCamelCase__ , max_length=32 , padding="max_length" , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.ta_base_tokenizer
lowerCAmelCase_ = ['''A long paragraph for summarization. </s>''']
lowerCAmelCase_ = ['''Summary of the text. </s>''']
# fmt: off
lowerCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCAmelCase_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCAmelCase_ = tokenizer(lowerCamelCase__ , text_target=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , batch["input_ids"][0] )
self.assertEqual(lowerCamelCase__ , batch["labels"][0] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
lowerCAmelCase_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
lowerCAmelCase_ = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
shutil.rmtree(lowerCamelCase__ )
lowerCAmelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(["bim", "bambam"] )
lowerCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCAmelCase_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
lowerCAmelCase_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
lowerCAmelCase_ = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase__ )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCAmelCase_ = json.load(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCAmelCase_ = json.load(lowerCamelCase__ )
lowerCAmelCase_ = [f"<extra_id_{i}>" for i in range(125 )]
lowerCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ = tokenizer_class.from_pretrained(
lowerCamelCase__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=lowerCamelCase__ )]
lowerCAmelCase_ = tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase__ )
lowerCAmelCase_ = tokenizer_class.from_pretrained(lowerCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def __a ( self ) -> Any:
pass
def __a ( self ) -> Dict:
pass
def __a ( self ) -> Tuple:
pass
def __a ( self ) -> Dict:
pass
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.get_tokenizers(fast=lowerCamelCase__ , do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowerCAmelCase_ = tokenizer.convert_tokens_to_string(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCAmelCase_ = 0
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
for attr in attributes_list:
setattr(lowerCamelCase__ , attr + "_id" , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , attr + "_id" ) , lowerCamelCase__ )
setattr(lowerCamelCase__ , attr + "_id" , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , attr + "_id" ) , lowerCamelCase__ )
setattr(lowerCamelCase__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens_ids" ) , [] )
setattr(lowerCamelCase__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 356
|
def A(__a: Optional[Any] ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = sum(__a )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff
| 22
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __magic_name__ (lowerCamelCase__ ):
def __init__( self , _a , _a ) -> Tuple:
lowerCAmelCase_ = params
lowerCAmelCase_ = np.array(lowercase__ )
lowerCAmelCase_ = np.array([len(lowercase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _a ) -> Optional[Any]:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Union[str, Any]:
return len(self.lengths )
def __a ( self ) -> Optional[Any]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.params.max_model_input_size
lowerCAmelCase_ = self.lengths > max_len
logger.info(f"Splitting {sum(lowercase__ )} too long sequences." )
def divide_chunks(_a , _a ):
return [l[i : i + n] for i in range(0 , len(lowercase__ ) , lowercase__ )]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
if self.params.mlm:
lowerCAmelCase_ , lowerCAmelCase_ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
lowerCAmelCase_ , lowerCAmelCase_ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase_ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase_ = np.insert(lowercase__ , 0 , lowercase__ )
if sub_s[-1] != sep_id:
lowerCAmelCase_ = np.insert(lowercase__ , len(lowercase__ ) , lowercase__ )
assert len(lowercase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase__ )
new_tok_ids.extend(lowercase__ )
new_lengths.extend([len(lowercase__ ) for l in sub_seqs] )
lowerCAmelCase_ = np.array(lowercase__ )
lowerCAmelCase_ = np.array(lowercase__ )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = len(self )
lowerCAmelCase_ = self.lengths > 11
lowerCAmelCase_ = self.token_ids[indices]
lowerCAmelCase_ = self.lengths[indices]
lowerCAmelCase_ = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def __a ( self ) -> Optional[int]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase_ = self.params.special_tok_ids["unk_token"]
lowerCAmelCase_ = len(self )
lowerCAmelCase_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase_ = (unk_occs / self.lengths) < 0.5
lowerCAmelCase_ = self.token_ids[indices]
lowerCAmelCase_ = self.lengths[indices]
lowerCAmelCase_ = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def __a ( self ) -> int:
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = [t[0] for t in batch]
lowerCAmelCase_ = [t[1] for t in batch]
assert len(lowercase__ ) == len(lowercase__ )
# Max for paddings
lowerCAmelCase_ = max(lowercase__ )
# Pad token ids
if self.params.mlm:
lowerCAmelCase_ = self.params.special_tok_ids["pad_token"]
else:
lowerCAmelCase_ = self.params.special_tok_ids["unk_token"]
lowerCAmelCase_ = [list(t.astype(lowercase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase__ )) for t in token_ids]
assert len(tk_ ) == len(lowercase__ )
assert all(len(lowercase__ ) == max_seq_len_ for t in tk_ )
lowerCAmelCase_ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase_ = torch.tensor(lowercase__ ) # (bs)
return tk_t, lg_t
| 357
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22
| 0
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __magic_name__ (SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''BlipImageProcessor'''
lowerCamelCase__ = '''AutoTokenizer'''
def __init__( self , _a , _a , _a ) -> str:
super().__init__(_a , _a )
# add QFormer tokenizer
lowerCAmelCase_ = qformer_tokenizer
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
lowerCAmelCase_ = BatchFeature()
if text is not None:
lowerCAmelCase_ = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
encoding.update(_a )
lowerCAmelCase_ = self.qformer_tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
lowerCAmelCase_ = qformer_text_encoding.pop("input_ids" )
lowerCAmelCase_ = qformer_text_encoding.pop("attention_mask" )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __a ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __a ( self ) -> int:
lowerCAmelCase_ = self.tokenizer.model_input_names
lowerCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __a ( self , _a , **_a ) -> Any:
if os.path.isfile(_a ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(_a , exist_ok=_a )
lowerCAmelCase_ = os.path.join(_a , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(_a )
return super().save_pretrained(_a , **_a )
@classmethod
def __a ( cls , _a , **_a ) -> Tuple:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , subfolder="qformer_tokenizer" )
lowerCAmelCase_ = cls._get_arguments_from_pretrained(_a , **_a )
args.append(_a )
return cls(*_a )
| 358
|
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A(__a: str ):
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 22
| 0
|
"""simple docstring"""
def A(__a: int , __a: int ):
while b:
lowerCAmelCase_ , lowerCAmelCase_ = b, a % b
return a
def A(__a: int , __a: int ):
return a if b == 0 else euclidean_gcd_recursive(a__ , a % b )
def A():
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 359
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 0
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __magic_name__ (ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def A():
if os.name == "nt":
lowerCAmelCase_ = CursorInfo()
lowerCAmelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__a , ctypes.byref(__a ) )
lowerCAmelCase_ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__a , ctypes.byref(__a ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def A():
if os.name == "nt":
lowerCAmelCase_ = CursorInfo()
lowerCAmelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__a , ctypes.byref(__a ) )
lowerCAmelCase_ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__a , ctypes.byref(__a ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def A():
try:
hide_cursor()
yield
finally:
show_cursor()
| 360
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22
| 0
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def A(__a: Union[str, Any] = True , *__a: List[Any] , **__a: List[str] ):
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
lowerCAmelCase_ = False
if main_process_only:
lowerCAmelCase_ = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 362
|
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22
| 0
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __magic_name__ (__SCREAMING_SNAKE_CASE ):
lowerCamelCase__ = '''detr'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _a=True , _a=None , _a=3 , _a=100 , _a=6 , _a=2048 , _a=8 , _a=6 , _a=2048 , _a=8 , _a=0.0 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.0_2 , _a=1.0 , _a=False , _a="sine" , _a="resnet50" , _a=True , _a=False , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , **_a , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = backbone_config.get("model_type" )
lowerCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# set timm attributes to None
lowerCAmelCase_ = None, None, None
lowerCAmelCase_ = use_timm_backbone
lowerCAmelCase_ = backbone_config
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = init_xavier_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = auxiliary_loss
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = backbone
lowerCAmelCase_ = use_pretrained_backbone
lowerCAmelCase_ = dilation
# Hungarian matcher
lowerCAmelCase_ = class_cost
lowerCAmelCase_ = bbox_cost
lowerCAmelCase_ = giou_cost
# Loss coefficients
lowerCAmelCase_ = mask_loss_coefficient
lowerCAmelCase_ = dice_loss_coefficient
lowerCAmelCase_ = bbox_loss_coefficient
lowerCAmelCase_ = giou_loss_coefficient
lowerCAmelCase_ = eos_coefficient
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __a ( self ) -> int:
return self.encoder_attention_heads
@property
def __a ( self ) -> int:
return self.d_model
@classmethod
def __a ( cls , _a , **_a ) -> str:
return cls(backbone_config=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __a ( self ) -> Dict[str, any]:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ = self.backbone_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
class __magic_name__ (__SCREAMING_SNAKE_CASE ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __a ( self ) -> float:
return 1E-5
@property
def __a ( self ) -> int:
return 12
| 363
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22
| 0
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def A(__a: List[str] , __a: Tuple , __a: List[str] , __a: List[Any] ):
if isinstance(_a , _a ):
lowerCAmelCase_ = np.full((len(_a ), sequence_length, 2) , _a )
else:
lowerCAmelCase_ = np.full((len(_a ), sequence_length) , _a )
for i, tensor in enumerate(_a ):
if padding_side == "right":
if isinstance(_a , _a ):
lowerCAmelCase_ = tensor[:sequence_length]
else:
lowerCAmelCase_ = tensor[:sequence_length]
else:
if isinstance(_a , _a ):
lowerCAmelCase_ = tensor[:sequence_length]
else:
lowerCAmelCase_ = tensor[:sequence_length]
return out_tensor.tolist()
def A(__a: Any ):
lowerCAmelCase_ = ord(_a )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowerCAmelCase_ = unicodedata.category(_a )
if cat.startswith("P" ):
return True
return False
@dataclass
class __magic_name__ (__snake_case ):
lowerCamelCase__ = 42
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = -100
lowerCamelCase__ = "pt"
def __a ( self , _a ) -> str:
import torch
lowerCAmelCase_ = """label""" if """label""" in features[0].keys() else """labels"""
lowerCAmelCase_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCAmelCase_ = self.tokenizer.pad(
lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
lowerCAmelCase_ = torch.tensor(batch["entity_ids"] ).shape[1]
lowerCAmelCase_ = self.tokenizer.padding_side
if padding_side == "right":
lowerCAmelCase_ = [
list(lowerCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase_ )) for label in labels
]
else:
lowerCAmelCase_ = [
[self.label_pad_token_id] * (sequence_length - len(lowerCamelCase_ )) + list(lowerCamelCase_ ) for label in labels
]
lowerCAmelCase_ = [feature["""ner_tags"""] for feature in features]
lowerCAmelCase_ = padding_tensor(lowerCamelCase_ , -1 , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase_ = [feature["""original_entity_spans"""] for feature in features]
lowerCAmelCase_ = padding_tensor(lowerCamelCase_ , (-1, -1) , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase_ = {k: torch.tensor(lowerCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 0
|
class __magic_name__ :
def __init__( self , _a ) -> List[str]:
lowerCAmelCase_ = val
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __a ( self , _a ) -> List[str]:
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase_ = Node(_lowercase )
else:
self.left.insert(_lowercase )
elif val > self.val:
if self.right is None:
lowerCAmelCase_ = Node(_lowercase )
else:
self.right.insert(_lowercase )
else:
lowerCAmelCase_ = val
def A(__a: List[Any] , __a: Optional[int] ):
if root:
inorder(root.left , SCREAMING_SNAKE_CASE_ )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE_ )
def A(__a: Optional[Any] ):
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return arr
lowerCAmelCase_ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase_ = []
inorder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 365
|
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def A(__a: Any , __a: Optional[Any] , __a: Optional[int] , __a: Tuple=None , __a: Any=None , __a: Tuple=None , __a: Optional[Any]=None , __a: Tuple=None , ):
if attention_mask is None:
lowerCAmelCase_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if decoder_head_mask is None:
lowerCAmelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if cross_attn_head_mask is None:
lowerCAmelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __magic_name__ :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=16 , _a=2 , _a=4 , _a=4 , _a="relu" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.0 , _a=20 , _a=2 , _a=1 , _a=0 , ) -> str:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = self.eos_token_id # Eos Token
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase_ = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase_ = self.get_config()
lowerCAmelCase_ = prepare_mam_aaa_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def __a ( self ) -> List[str]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __a ( self ) -> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _a , _a ) -> int:
lowerCAmelCase_ = MaMaaaModel(config=_UpperCAmelCase ).get_decoder().to(_UpperCAmelCase ).eval()
lowerCAmelCase_ = inputs_dict["input_ids"]
lowerCAmelCase_ = inputs_dict["attention_mask"]
lowerCAmelCase_ = inputs_dict["head_mask"]
# first forward pass
lowerCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )["last_hidden_state"]
lowerCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[
"last_hidden_state"
]
# select random slice
lowerCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-2 ) )
def __a ( self , _a , _a ) -> List[Any]:
lowerCAmelCase_ = MaMaaaModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
lowerCAmelCase_ = model(**_UpperCAmelCase )
lowerCAmelCase_ = outputs.encoder_last_hidden_state
lowerCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = model.get_encoder()
encoder.save_pretrained(_UpperCAmelCase )
lowerCAmelCase_ = MaMaaaEncoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
lowerCAmelCase_ = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = model.get_decoder()
decoder.save_pretrained(_UpperCAmelCase )
lowerCAmelCase_ = MaMaaaDecoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
lowerCAmelCase_ = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __magic_name__ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCamelCase__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase__ = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self , _a , _a , _a , _a , _a ) -> List[Any]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = MaMaaaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase )
def __a ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ = model_class.from_pretrained(_UpperCAmelCase , output_loading_info=_UpperCAmelCase )
self.assertEqual(info["missing_keys"] , [] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_UpperCAmelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCAmelCase_ = copy.deepcopy(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
if not self.is_encoder_decoder:
lowerCAmelCase_ = inputs["input_ids"]
del inputs["input_ids"]
else:
lowerCAmelCase_ = inputs["input_ids"]
lowerCAmelCase_ = inputs.get("decoder_input_ids" , _UpperCAmelCase )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , _UpperCAmelCase )
lowerCAmelCase_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase_ = wte(_UpperCAmelCase )
else:
lowerCAmelCase_ = wte(_UpperCAmelCase )
lowerCAmelCase_ = wte(_UpperCAmelCase )
with torch.no_grad():
model(**_UpperCAmelCase )[0]
def __a ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = input_dict["input_ids"]
lowerCAmelCase_ = input_ids.ne(1 ).to(_UpperCAmelCase )
lowerCAmelCase_ = MaMaaaForConditionalGeneration(_UpperCAmelCase ).eval().to(_UpperCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
model.generate(num_beams=4 , do_sample=_UpperCAmelCase , early_stopping=_UpperCAmelCase , num_return_sequences=3 )
def A(__a: Optional[int] ):
return torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __magic_name__ (unittest.TestCase ):
@cached_property
def __a ( self ) -> Union[str, Any]:
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(_UpperCAmelCase )
lowerCAmelCase_ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowerCAmelCase_ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowerCAmelCase_ = prepare_mam_aaa_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ = model(**_UpperCAmelCase )[0]
lowerCAmelCase_ = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
lowerCAmelCase_ = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(_UpperCAmelCase )
# change to intended input
lowerCAmelCase_ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowerCAmelCase_ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowerCAmelCase_ = prepare_mam_aaa_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ = model(**_UpperCAmelCase )[0]
lowerCAmelCase_ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
lowerCAmelCase_ = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(_UpperCAmelCase )
lowerCAmelCase_ = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
lowerCAmelCase_ = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase_ = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors="pt" )
lowerCAmelCase_ = model.generate(
input_ids=dct["input_ids"].to(_UpperCAmelCase ) , attention_mask=dct["attention_mask"].to(_UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
lowerCAmelCase_ = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
lowerCAmelCase_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
assert generated == expected_en
| 366
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22
| 0
|
from ..utils import DummyObject, requires_backends
class __magic_name__ (metaclass=_a ):
lowerCamelCase__ = ["""keras_nlp"""]
def __init__( self , *_a , **_a ) -> Union[str, Any]:
requires_backends(self , ["keras_nlp"] )
| 367
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __magic_name__ (__lowercase ):
lowerCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 368
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
lowerCamelCase__ = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369
|
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _UpperCAmelCase (__lowercase , __lowercase ):
lowerCamelCase__ = '''nat'''
lowerCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _a=4 , _a=3 , _a=64 , _a=[3, 4, 6, 5] , _a=[2, 4, 8, 16] , _a=7 , _a=3.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=0.0_2 , _a=1E-5 , _a=0.0 , _a=None , _a=None , **_a , ) -> str:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = depths
lowerCAmelCase_ = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = kernel_size
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 370
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22
| 0
|
def A(__a: list[list[int | float]] ):
lowerCAmelCase_ = len(lowerCamelCase__ )
lowerCAmelCase_ = len(matrix[0] )
lowerCAmelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
for row in range(lowerCamelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCamelCase__ ):
lowerCAmelCase_ = matrix[col][row] / matrix[row][row]
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase_ = True
for i in range(row + 1 , lowerCamelCase__ ):
if matrix[i][row] != 0:
lowerCAmelCase_ , lowerCAmelCase_ = matrix[i], matrix[row]
lowerCAmelCase_ = False
break
if reduce:
rank -= 1
for i in range(lowerCamelCase__ ):
lowerCAmelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22
| 0
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
lowerCamelCase__ = {'''mgp-str''': 27}
class __magic_name__ (__SCREAMING_SNAKE_CASE ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a="[GO]" , _a="[GO]" , _a="[s]" , _a="[GO]" , **_a ) -> Union[str, Any]:
super().__init__(
unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , **_snake_case , )
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ = json.load(_snake_case )
lowerCAmelCase_ = {v: k for k, v in self.vocab.items()}
@property
def __a ( self ) -> str:
return len(self.vocab )
def __a ( self ) -> Any:
return dict(self.vocab , **self.added_tokens_encoder )
def __a ( self , _a ) -> Optional[Any]:
lowerCAmelCase_ = []
for s in text:
char_tokens.extend(_snake_case )
return char_tokens
def __a ( self , _a ) -> int:
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def __a ( self , _a ) -> List[Any]:
return self.decoder.get(_snake_case )
def __a ( self , _a , _a = None ) -> Any:
if not os.path.isdir(_snake_case ):
logger.error("Vocabulary path ({}) should be a directory".format(_snake_case ) )
return
lowerCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + "\n" )
return (vocab_file,)
| 350
|
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22
| 0
|
def A():
for n in range(1 , 100_0000 ):
yield n * (n + 1) // 2
def A(__a: Dict ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
while i * i <= n:
lowerCAmelCase_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A():
return next(i for i in triangle_number_generator() if count_divisors(UpperCamelCase__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 351
|
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __magic_name__ :
def __init__( self , _a ) -> Optional[Any]:
lowerCAmelCase_ = data
lowerCAmelCase_ = None
class __magic_name__ :
def __init__( self ) -> Optional[Any]:
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __iter__( self ) -> List[Any]:
lowerCAmelCase_ = self.head
while self.head:
yield node.data
lowerCAmelCase_ = node.next
if node == self.head:
break
def __len__( self ) -> Dict:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join(str(_a ) for item in iter(self ) )
def __a ( self , _a ) -> Optional[Any]:
self.insert_nth(len(self ) , _a )
def __a ( self , _a ) -> str:
self.insert_nth(0 , _a )
def __a ( self , _a , _a ) -> Optional[int]:
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
lowerCAmelCase_ = Node(_a )
if self.head is None:
lowerCAmelCase_ = new_node # first node points itself
lowerCAmelCase_ = lowerCAmelCase_ = new_node
elif index == 0: # insert at head
lowerCAmelCase_ = self.head
lowerCAmelCase_ = lowerCAmelCase_ = new_node
else:
lowerCAmelCase_ = self.head
for _ in range(index - 1 ):
lowerCAmelCase_ = temp.next
lowerCAmelCase_ = temp.next
lowerCAmelCase_ = new_node
if index == len(self ) - 1: # insert at tail
lowerCAmelCase_ = new_node
def __a ( self ) -> Tuple:
return self.delete_nth(0 )
def __a ( self ) -> List[str]:
return self.delete_nth(len(self ) - 1 )
def __a ( self , _a = 0 ) -> int:
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
lowerCAmelCase_ = self.head
if self.head == self.tail: # just one node
lowerCAmelCase_ = lowerCAmelCase_ = None
elif index == 0: # delete head node
lowerCAmelCase_ = self.tail.next.next
lowerCAmelCase_ = self.head.next
else:
lowerCAmelCase_ = self.head
for _ in range(index - 1 ):
lowerCAmelCase_ = temp.next
lowerCAmelCase_ = temp.next
lowerCAmelCase_ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCAmelCase_ = temp
return delete_node.data
def __a ( self ) -> List[str]:
return len(self ) == 0
def A():
lowerCAmelCase_ = CircularLinkedList()
assert len(UpperCAmelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCAmelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCAmelCase_ ) == i
circular_linked_list.insert_nth(UpperCAmelCase_ , i + 1 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __magic_name__ (lowerCamelCase__ ):
lowerCamelCase__ = '''gpt_neo'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , _a=50257 , _a=2048 , _a=2048 , _a=24 , _a=[[["global", "local"], 12]] , _a=16 , _a=None , _a=256 , _a="gelu_new" , _a=0.0 , _a=0.0 , _a=0.0 , _a=0.1 , _a=1E-5 , _a=0.0_2 , _a=True , _a=50256 , _a=50256 , **_a , ) -> Any:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_layers
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = window_size
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = resid_dropout
lowerCAmelCase_ = embed_dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = classifier_dropout
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = attention_types
lowerCAmelCase_ = self.expand_attention_types_params(_a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
@staticmethod
def __a ( _a ) -> Optional[Any]:
lowerCAmelCase_ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def A(__a: List[str] , __a: Tuple , __a: List[str] , __a: Any ):
import torch
lowerCAmelCase_ = input.size()
lowerCAmelCase_ = len(_lowerCAmelCase )
lowerCAmelCase_ = shape[dimension]
lowerCAmelCase_ = torch.arange(0 , _lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase_ = torch.div(sizedim - size , _lowerCAmelCase , rounding_mode="floor" ) + 1
lowerCAmelCase_ = torch.arange(_lowerCAmelCase ) + low_indices[:min_length][:, None]
lowerCAmelCase_ = [slice(_lowerCAmelCase )] * rank
lowerCAmelCase_ = indices
lowerCAmelCase_ = input[s]
lowerCAmelCase_ = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowerCAmelCase )
def A(__a: List[str] , __a: int ):
import torch
lowerCAmelCase_ = torch.arange(1 , _lowerCAmelCase )
lowerCAmelCase_ = torch.remainder(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase_ = remainders == 0
lowerCAmelCase_ = candidates[divisor_indices]
lowerCAmelCase_ = torch.max(_lowerCAmelCase )
return largest_divisor, torch.div(_lowerCAmelCase , _lowerCAmelCase , rounding_mode="floor" )
class __magic_name__ (lowerCamelCase__ ):
@property
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_a , direction="inputs" )
lowerCAmelCase_ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __a ( self ) -> List[Any]:
return self._config.num_heads
def __a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Dict:
lowerCAmelCase_ = super(_a , self ).generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase_ = seqlen + 2
lowerCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
lowerCAmelCase_ = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase_ = ordered_inputs["attention_mask"].dtype
lowerCAmelCase_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_a , _a , dtype=_a )] , dim=1 )
return ordered_inputs
@property
def __a ( self ) -> str:
return 13
| 353
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22
| 0
|
def A(__a: int = 200_0000 ):
lowerCAmelCase_ = [0 for i in range(n + 1 )]
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __a ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
for i in range(__a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 354
|
import datasets
lowerCamelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCamelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A(__a: Dict , __a: Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __a ( self , _a , _a ) -> List[str]:
return {"accuracy": simple_accuracy(_a , _a )}
| 22
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Dict:
lowerCAmelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
lowerCAmelCase_ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCAmelCase_ = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def __a ( self , **_a ) -> str:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def __a ( self , **_a ) -> Any:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def __a ( self , **_a ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def __a ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __a ( self ) -> str:
lowerCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase_ = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(lowercase_ , return_tensors="np" )
lowerCAmelCase_ = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = processor(text=lowercase_ )
lowerCAmelCase_ = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self ) -> int:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def __a ( self ) -> str:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowerCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ = processor.batch_decode(lowercase_ )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 355
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = '''bert-base-cased'''
lowerCamelCase__ = '''google/pegasus-xsum'''
lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''sshleifer/tiny-mbart'''
lowerCamelCase__ = '''sshleifer/tiny-marian-en-de'''
def A(__a: Path , __a: list ):
lowerCAmelCase_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
def A(__a: str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , F"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ (__lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase_ = {x.name for x in save_dir.iterdir()}
lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 )
lowerCAmelCase_ = 64
lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
lowerCAmelCase_ = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for batch in data_loader:
lowerCAmelCase_ = batch["input_ids"].shape
lowerCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"too many tokens in {len(_a )} batches" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 )
lowerCAmelCase_ = 2
lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
lowerCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __a ( self , _a=1000 , _a=128 ) -> str:
if os.getenv("USE_REAL_DATA" , _a ):
lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCAmelCase_ = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCAmelCase_ = max_len * 4
save_len_file(_a , _a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset()
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 22
| 0
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self ) -> Union[str, Any]:
lowerCAmelCase_ = ""
lowerCAmelCase_ = ""
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = 256
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = cva.imread(lowerCamelCase__ , 0 )
lowerCAmelCase_ = copy.deepcopy(self.img )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
lowerCAmelCase_ = np.sum(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
lowerCAmelCase_ = x[i] / self.k
self.sk += prk
lowerCAmelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase_ = int(last % last )
lowerCAmelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase__ )
lowerCAmelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase_ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def __a ( self ) -> Optional[Any]:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def __a ( self ) -> Any:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase__ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCamelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 356
|
def A(__a: Optional[Any] ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = sum(__a )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff
| 22
| 0
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCamelCase__ = 25_60_47
lowerCamelCase__ = 25_61_45
@require_sentencepiece
@require_tokenizers
class __magic_name__ (a_ , unittest.TestCase ):
lowerCamelCase__ = NllbTokenizer
lowerCamelCase__ = NllbTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = {}
def __a ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ = NllbTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = NllbTokenizer(lowercase_ , keep_accents=lowercase_ )
lowerCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = tokenizer_r.save_pretrained(lowercase_ )
lowerCAmelCase_ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
lowerCAmelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ = tokenizer_r.from_pretrained(lowercase_ )
lowerCAmelCase_ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
lowerCAmelCase_ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ = tokenizer_r.from_pretrained(lowercase_ )
lowerCAmelCase_ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
lowerCAmelCase_ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ = tokenizer_r.from_pretrained(lowercase_ )
lowerCAmelCase_ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
def __a ( self ) -> Any:
if not self.test_seqaseq:
return
lowerCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
lowerCAmelCase_ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
lowerCAmelCase_ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
lowerCAmelCase_ = tokenizer.prepare_seqaseq_batch(
src_texts=lowercase_ , tgt_texts=lowercase_ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
lowerCAmelCase_ = tokenizer.prepare_seqaseq_batch(
lowercase_ , tgt_texts=lowercase_ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
lowerCAmelCase_ = tokenizer.prepare_seqaseq_batch(
src_texts=lowercase_ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , lowercase_ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def __a ( self ) -> int:
pass
def __a ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = [AddedToken("<special>" , lstrip=lowercase_ )]
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ )
lowerCAmelCase_ = tokenizer_r.encode("Hey this is a <special> token" )
lowerCAmelCase_ = tokenizer_r.encode("<special>" , add_special_tokens=lowercase_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ )
lowerCAmelCase_ = tokenizer_p.encode("Hey this is a <special> token" )
lowerCAmelCase_ = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = '''facebook/nllb-200-distilled-600M'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def __a ( cls ) -> Optional[Any]:
lowerCAmelCase_ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
lowerCAmelCase_ = 1
return cls
def __a ( self ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 256057 )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def __a ( self ) -> List[Any]:
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
# fmt: off
lowerCAmelCase_ = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
lowerCAmelCase_ = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def __a ( self ) -> int:
lowerCAmelCase_ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowercase_ )
lowerCAmelCase_ = 10
lowerCAmelCase_ = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , lowercase_ )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def __a ( self ) -> Optional[int]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [256203, 3] )
def __a ( self ) -> Any:
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
lowerCAmelCase_ = NllbTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def __a ( self ) -> str:
lowerCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowerCAmelCase_ = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
lowerCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(lowercase_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors="pt" )
lowerCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors="pt" )
lowerCAmelCase_ = targets['''input_ids''']
lowerCAmelCase_ = shift_tokens_right(
lowercase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# A, test, EOS, en_XX
"input_ids": [[256047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 256057,
} , )
@require_torch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 357
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22
| 0
|
import math
def A(__a: Any ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A(__a: Optional[Any] = 1_0001 ):
try:
lowerCAmelCase_ = int(_lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
lowerCAmelCase_ = []
lowerCAmelCase_ = 2
while len(_lowercase ) < nth:
if is_prime(_lowercase ):
primes.append(_lowercase )
num += 1
else:
num += 1
return primes[len(_lowercase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 358
|
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A(__a: str ):
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 22
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __magic_name__ (metaclass=UpperCAmelCase__ ):
lowerCamelCase__ = ['speech']
def __init__( self , *_a , **_a ) -> Optional[Any]:
requires_backends(self , ["speech"] )
class __magic_name__ (metaclass=UpperCAmelCase__ ):
lowerCamelCase__ = ['speech']
def __init__( self , *_a , **_a ) -> int:
requires_backends(self , ["speech"] )
| 359
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 0
|
lowerCamelCase__ = range(2, 20 + 1)
lowerCamelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCamelCase__ = {}
def A(__a: int , __a: Tuple , __a: Tuple , __a: Optional[int] ):
lowerCAmelCase_ = sum(a_i[j] for j in range(_a , len(_a ) ) )
lowerCAmelCase_ = sum(a_i[j] * base[j] for j in range(min(len(_a ) , _a ) ) )
lowerCAmelCase_ , lowerCAmelCase_ = 0, 0
lowerCAmelCase_ = n - i
lowerCAmelCase_ = memo.get(_a )
if sub_memo is not None:
lowerCAmelCase_ = sub_memo.get(_a )
if jumps is not None and len(_a ) > 0:
# find and make the largest jump without going over
lowerCAmelCase_ = -1
for _k in range(len(_a ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase_ = _k
break
if max_jump >= 0:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase_ = diff + c
for j in range(min(_a , len(_a ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ = divmod(_a , 10 )
if new_c > 0:
add(_a , _a , _a )
else:
lowerCAmelCase_ = []
else:
lowerCAmelCase_ = {c: []}
lowerCAmelCase_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase_ , lowerCAmelCase_ = next_term(_a , k - 1 , i + dn , _a )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase_ , lowerCAmelCase_ = compute(_a , _a , i + dn , _a )
diff += _diff
dn += terms_jumped
lowerCAmelCase_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase_ = 0
while j < len(_a ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_a , (diff, dn, k) )
return (diff, dn)
def A(__a: int , __a: Dict , __a: str , __a: Tuple ):
if i >= n:
return 0, i
if k > len(_a ):
a_i.extend([0 for _ in range(k - len(_a ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase_ = i
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0, 0, 0
for j in range(len(_a ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase_ = ds_c + ds_b
diff += addend
lowerCAmelCase_ = 0
for j in range(_a ):
lowerCAmelCase_ = a_i[j] + addend
lowerCAmelCase_ , lowerCAmelCase_ = divmod(_a , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_a , _a , _a )
return diff, i - start_i
def A(__a: Union[str, Any] , __a: Optional[int] , __a: Any ):
for j in range(_a , len(_a ) ):
lowerCAmelCase_ = digits[j] + addend
if s >= 10:
lowerCAmelCase_ , lowerCAmelCase_ = divmod(_a , 10 )
lowerCAmelCase_ = addend // 10 + quotient
else:
lowerCAmelCase_ = s
lowerCAmelCase_ = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase_ , lowerCAmelCase_ = divmod(_a , 10 )
digits.append(_a )
def A(__a: int = 10**15 ):
lowerCAmelCase_ = [1]
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
while True:
lowerCAmelCase_ , lowerCAmelCase_ = next_term(_a , 20 , i + dn , _a )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase_ = 0
for j in range(len(_a ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 360
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22
| 0
|
def A(__a: Optional[int] ):
lowerCAmelCase_ = abs(__lowerCAmelCase )
lowerCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def A(__a: Optional[Any] ):
lowerCAmelCase_ = abs(__lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A(__a: str ):
return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) )
def A():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a: Optional[Any] , __a: List[Any] ) -> None:
lowerCAmelCase_ = F"{func.__name__}({value})"
lowerCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
print(F"{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 361
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22
| 0
|
import math
def A(__a: int = 100 ):
lowerCAmelCase_ = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase_ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 362
|
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22
| 0
|
def A(__a: int , __a: int , __a: Tuple=False ):
if isinstance(__a , __a ) and isinstance(__a , __a ):
lowerCAmelCase_ = len(set_a.intersection(__a ) )
if alternative_union:
lowerCAmelCase_ = len(__a ) + len(__a )
else:
lowerCAmelCase_ = len(set_a.union(__a ) )
return intersection / union
if isinstance(__a , (list, tuple) ) and isinstance(__a , (list, tuple) ):
lowerCAmelCase_ = [element for element in set_a if element in set_b]
if alternative_union:
lowerCAmelCase_ = len(__a ) + len(__a )
return len(__a ) / union
else:
lowerCAmelCase_ = set_a + [element for element in set_b if element not in set_a]
return len(__a ) / len(__a )
return len(__a ) / len(__a )
return None
if __name__ == "__main__":
lowerCamelCase__ = {"""a""", """b""", """c""", """d""", """e"""}
lowerCamelCase__ = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 363
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __magic_name__ (__lowerCAmelCase ):
lowerCamelCase__ = '''distilbert'''
lowerCamelCase__ = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _a=30522 , _a=512 , _a=False , _a=6 , _a=12 , _a=768 , _a=4 * 768 , _a=0.1 , _a=0.1 , _a="gelu" , _a=0.0_2 , _a=0.1 , _a=0.2 , _a=0 , **_a , ) -> Tuple:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = sinusoidal_pos_embds
lowerCAmelCase_ = n_layers
lowerCAmelCase_ = n_heads
lowerCAmelCase_ = dim
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = qa_dropout
lowerCAmelCase_ = seq_classif_dropout
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ )
class __magic_name__ (__lowerCAmelCase ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 0
|
def A(__a: int = 1000 ):
lowerCAmelCase_ = -1
lowerCAmelCase_ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCAmelCase_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCAmelCase_ = n - a - b
if c * c == (a * a + b * b):
lowerCAmelCase_ = a * b * c
if candidate >= product:
lowerCAmelCase_ = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 365
|
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 366
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22
| 0
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase__ = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
lowerCamelCase__ = None
def A():
lowerCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCamelCase_ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCamelCase_ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def A(__a: Dict ):
lowerCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def A(__a: List[Any] ):
def remove_articles(__a: Tuple ):
return ARTICLES_REGEX.sub(" " , lowerCamelCase_ )
def white_space_fix(__a: Dict ):
return " ".join(text.split() )
def remove_punc(__a: Optional[int] ):
lowerCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a: Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def A(__a: List[Any] ):
if not s:
return []
return normalize_answer(lowerCamelCase_ ).split()
def A(__a: str , __a: Dict ):
return int(normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) )
def A(__a: Tuple , __a: int ):
lowerCAmelCase_ = get_tokens(lowerCamelCase_ )
lowerCAmelCase_ = get_tokens(lowerCamelCase_ )
lowerCAmelCase_ = collections.Counter(lowerCamelCase_ ) & collections.Counter(lowerCamelCase_ )
lowerCAmelCase_ = sum(common.values() )
if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCAmelCase_ = 1.0 * num_same / len(lowerCamelCase_ )
lowerCAmelCase_ = 1.0 * num_same / len(lowerCamelCase_ )
lowerCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def A(__a: Optional[int] , __a: List[Any] ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ = qa['id']
lowerCAmelCase_ = [t for t in qa['answers']['text'] if normalize_answer(lowerCamelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ = ['']
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
lowerCAmelCase_ = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ = max(compute_exact(lowerCamelCase_ , lowerCamelCase_ ) for a in gold_answers )
lowerCAmelCase_ = max(compute_fa(lowerCamelCase_ , lowerCamelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def A(__a: List[str] , __a: Optional[Any] , __a: Any , __a: Tuple ):
lowerCAmelCase_ = {}
for qid, s in scores.items():
lowerCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
lowerCAmelCase_ = s
return new_scores
def A(__a: Optional[Any] , __a: Union[str, Any] , __a: int=None ):
if not qid_list:
lowerCAmelCase_ = len(lowerCamelCase_ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
lowerCAmelCase_ = len(lowerCamelCase_ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def A(__a: Optional[int] , __a: Any , __a: int ):
for k in new_eval:
lowerCAmelCase_ = new_eval[k]
def A(__a: str , __a: Dict , __a: Union[str, Any] , __a: Union[str, Any] ):
plt.step(lowerCamelCase_ , lowerCamelCase_ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCamelCase_ , lowerCamelCase_ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCamelCase_ )
plt.savefig(lowerCamelCase_ )
plt.clf()
def A(__a: Dict , __a: int , __a: Optional[int] , __a: str , __a: Dict=None , __a: str=None ):
lowerCAmelCase_ = sorted(lowerCamelCase_ , key=lambda __a : na_probs[k] )
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 1.0
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = [1.0]
lowerCAmelCase_ = [0.0]
lowerCAmelCase_ = 0.0
for i, qid in enumerate(lowerCamelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ = true_pos / float(i + 1 )
lowerCAmelCase_ = true_pos / float(lowerCamelCase_ )
if i == len(lowerCamelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase_ )
recalls.append(lowerCamelCase_ )
if out_image:
plot_pr_curve(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return {"ap": 100.0 * avg_prec}
def A(__a: Tuple , __a: str , __a: List[str] , __a: Union[str, Any] , __a: Union[str, Any] , __a: List[str] ):
if out_image_dir and not os.path.exists(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
lowerCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCAmelCase_ = make_precision_recall_eval(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , out_image=os.path.join(lowerCamelCase_ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ = make_precision_recall_eval(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , out_image=os.path.join(lowerCamelCase_ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ = {k: float(lowerCamelCase_ ) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ = make_precision_recall_eval(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , out_image=os.path.join(lowerCamelCase_ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCamelCase_ , lowerCamelCase_ , "pr_exact" )
merge_eval(lowerCamelCase_ , lowerCamelCase_ , "pr_f1" )
merge_eval(lowerCamelCase_ , lowerCamelCase_ , "pr_oracle" )
def A(__a: Any , __a: str , __a: int , __a: Tuple ):
if not qid_list:
return
lowerCAmelCase_ = [na_probs[k] for k in qid_list]
lowerCAmelCase_ = np.ones_like(lowerCamelCase_ ) / float(len(lowerCamelCase_ ) )
plt.hist(lowerCamelCase_ , weights=lowerCamelCase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(lowerCamelCase_ , F"na_prob_hist_{name}.png" ) )
plt.clf()
def A(__a: Any , __a: Tuple , __a: int , __a: Tuple ):
lowerCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCAmelCase_ = num_no_ans
lowerCAmelCase_ = cur_score
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = sorted(lowerCamelCase_ , key=lambda __a : na_probs[k] )
for i, qid in enumerate(lowerCamelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ = -1
else:
lowerCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ = cur_score
lowerCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCamelCase_ ), best_thresh
def A(__a: Any , __a: List[str] , __a: Optional[int] , __a: Any , __a: Any , __a: List[Any] ):
lowerCAmelCase_ = find_best_thresh(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase_ = find_best_thresh(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase_ = best_exact
lowerCAmelCase_ = exact_thresh
lowerCAmelCase_ = best_fa
lowerCAmelCase_ = fa_thresh
def A():
with open(OPTS.data_file ) as f:
lowerCAmelCase_ = json.load(lowerCamelCase_ )
lowerCAmelCase_ = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowerCAmelCase_ = json.load(lowerCamelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCAmelCase_ = json.load(lowerCamelCase_ )
else:
lowerCAmelCase_ = {k: 0.0 for k in preds}
lowerCAmelCase_ = make_qid_to_has_ans(lowerCamelCase_ ) # maps qid to True/False
lowerCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ = get_raw_scores(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase_ = apply_no_ans_threshold(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , OPTS.na_prob_thresh )
lowerCAmelCase_ = apply_no_ans_threshold(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , OPTS.na_prob_thresh )
lowerCAmelCase_ = make_eval_dict(lowerCamelCase_ , lowerCamelCase_ )
if has_ans_qids:
lowerCAmelCase_ = make_eval_dict(lowerCamelCase_ , lowerCamelCase_ , qid_list=lowerCamelCase_ )
merge_eval(lowerCamelCase_ , lowerCamelCase_ , "HasAns" )
if no_ans_qids:
lowerCAmelCase_ = make_eval_dict(lowerCamelCase_ , lowerCamelCase_ , qid_list=lowerCamelCase_ )
merge_eval(lowerCamelCase_ , lowerCamelCase_ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , OPTS.out_image_dir )
histogram_na_prob(lowerCamelCase_ , lowerCamelCase_ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCamelCase_ , lowerCamelCase_ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
else:
print(json.dumps(lowerCamelCase_ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 367
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22
| 0
|
def A(__a: float , __a: float , __a: float , __a: float , __a: float , ):
lowerCAmelCase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowerCAmelCase_ = 1 - (matter_density + radiation_density + dark_energy)
lowerCAmelCase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCAmelCase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 368
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22
| 0
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , R'''\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ''' , )
class __magic_name__ (_lowerCAmelCase ):
def __a ( self , _a ) -> Optional[Any]:
if self.framework == "tf":
lowerCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = self.get_masked_index(_lowercase )
lowerCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def __a ( self , _a ) -> Optional[Any]:
if isinstance(_lowercase , _lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowercase )
def __a ( self , _a , _a=None , **_a ) -> Optional[int]:
if return_tensors is None:
lowerCAmelCase_ = self.framework
lowerCAmelCase_ = self.tokenizer(_lowercase , return_tensors=_lowercase )
self.ensure_exactly_one_mask_token(_lowercase )
return model_inputs
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = self.model(**_lowercase )
lowerCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def __a ( self , _a , _a=5 , _a=None ) -> int:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase_ = target_ids.shape[0]
lowerCAmelCase_ = model_outputs["input_ids"][0]
lowerCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
lowerCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase_ = outputs.numpy()
lowerCAmelCase_ = outputs[0, masked_index, :]
lowerCAmelCase_ = stable_softmax(_lowercase , axis=-1 )
if target_ids is not None:
lowerCAmelCase_ = tf.gather_nd(tf.squeeze(_lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase_ = tf.expand_dims(_lowercase , 0 )
lowerCAmelCase_ = tf.math.top_k(_lowercase , k=_lowercase )
lowerCAmelCase_ , lowerCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase_ = outputs[0, masked_index, :]
lowerCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase_ = probs[..., target_ids]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(_lowercase )
lowerCAmelCase_ = []
lowerCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase_ = target_ids[p].tolist()
lowerCAmelCase_ = p
# Filter padding out:
lowerCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase_ = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
lowerCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowercase )
result.append(_lowercase )
if single_mask:
return result[0]
return result
def __a ( self , _a , _a=None ) -> List[Any]:
if isinstance(_lowercase , _lowercase ):
lowerCAmelCase_ = [targets]
try:
lowerCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase_ = {}
lowerCAmelCase_ = []
for target in targets:
lowerCAmelCase_ = vocab.get(_lowercase , _lowercase )
if id_ is None:
lowerCAmelCase_ = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , max_length=1 , truncation=_lowercase , )["input_ids"]
if len(_lowercase ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"We cannot replace it with anything meaningful, ignoring it" )
continue
lowerCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase_ = list(set(_lowercase ) )
if len(_lowercase ) == 0:
raise ValueError("At least one target must be provided when passed." )
lowerCAmelCase_ = np.array(_lowercase )
return target_ids
def __a ( self , _a=None , _a=None ) -> Optional[int]:
lowerCAmelCase_ = {}
if targets is not None:
lowerCAmelCase_ = self.get_target_ids(_lowercase , _lowercase )
lowerCAmelCase_ = target_ids
if top_k is not None:
lowerCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
lowerCAmelCase_ = super().__call__(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
| 369
|
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22
| 0
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _UpperCAmelCase (_a ):
lowerCamelCase__ = """M-CLIP"""
def __init__( self , _a=1024 , _a=768 , **_a ) -> int:
lowerCAmelCase_ = transformerDimSize
lowerCAmelCase_ = imageDimSize
super().__init__(**__lowerCAmelCase )
class _UpperCAmelCase (_a ):
lowerCamelCase__ = MCLIPConfig
def __init__( self , _a , *_a , **_a ) -> str:
super().__init__(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase_ = XLMRobertaModel(__lowerCAmelCase )
lowerCAmelCase_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __a ( self , _a , _a ) -> int:
lowerCAmelCase_ = self.transformer(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
lowerCAmelCase_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__lowerCAmelCase ), embs
| 370
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22
| 0
|
def A(__a: Any , __a: Tuple ):
return "\n".join(
F"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 371
|
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22
| 0
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A(__a: str , __a: Union[str, Any] , __a: Union[str, Any] , __a: Any=5 ):
assert masked_input.count("<mask>" ) == 1
lowerCAmelCase_ = torch.tensor(tokenizer.encode(A__ , add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1
lowerCAmelCase_ = model(A__ )[0] # The last hidden-state is the first element of the output tuple
lowerCAmelCase_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowerCAmelCase_ = logits[0, masked_index, :]
lowerCAmelCase_ = logits.softmax(dim=0 )
lowerCAmelCase_ = prob.topk(k=A__ , dim=0 )
lowerCAmelCase_ = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] )
lowerCAmelCase_ = tokenizer.mask_token
lowerCAmelCase_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
lowerCAmelCase_ = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(A__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(A__ ) , A__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A__ , A__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCamelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''')
lowerCamelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowerCamelCase__ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 350
|
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22
| 0
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __magic_name__ :
lowerCamelCase__ = None
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , snake_case_ )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = os.path.join(snake_case_ , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case_ )
lowerCAmelCase_ = self.feature_extraction_class.from_json_file(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
lowerCAmelCase_ = self.feature_extraction_class.from_pretrained(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.feature_extraction_class()
self.assertIsNotNone(snake_case_ )
| 351
|
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22
| 0
|
import sys
lowerCamelCase__ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def A(__a: str ):
lowerCAmelCase_ = 1
for digit in s:
product *= int(__a )
return product
def A(__a: str = N ):
lowerCAmelCase_ = -sys.maxsize - 1
lowerCAmelCase_ = n[:13]
lowerCAmelCase_ = 13
while cur_index < len(__a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCAmelCase_ = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCAmelCase_ = max(__a , str_eval(__a ) )
lowerCAmelCase_ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 352
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22
| 0
|
def A(__a: Any = 1000 ):
return sum(e for e in range(3 , __a ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 354
|
import datasets
lowerCamelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCamelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A(__a: Dict , __a: Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __a ( self , _a , _a ) -> List[str]:
return {"accuracy": simple_accuracy(_a , _a )}
| 22
| 0
|
import random
from typing import Any
def A(__a: list ):
for _ in range(len(lowerCamelCase_ ) ):
lowerCAmelCase_ = random.randint(0 , len(lowerCamelCase_ ) - 1 )
lowerCAmelCase_ = random.randint(0 , len(lowerCamelCase_ ) - 1 )
lowerCAmelCase_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ = ['python', 'says', 'hello', '!']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 355
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = '''bert-base-cased'''
lowerCamelCase__ = '''google/pegasus-xsum'''
lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''sshleifer/tiny-mbart'''
lowerCamelCase__ = '''sshleifer/tiny-marian-en-de'''
def A(__a: Path , __a: list ):
lowerCAmelCase_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
def A(__a: str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , F"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ (__lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase_ = {x.name for x in save_dir.iterdir()}
lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 )
lowerCAmelCase_ = 64
lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
lowerCAmelCase_ = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for batch in data_loader:
lowerCAmelCase_ = batch["input_ids"].shape
lowerCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"too many tokens in {len(_a )} batches" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 )
lowerCAmelCase_ = 2
lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
lowerCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __a ( self , _a=1000 , _a=128 ) -> str:
if os.getenv("USE_REAL_DATA" , _a ):
lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCAmelCase_ = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCAmelCase_ = max_len * 4
save_len_file(_a , _a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset()
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 22
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self , _a , _a=2 , _a=True , _a=False , _a=10 , _a=3 , _a=32 * 8 , _a=32 * 8 , _a=4 , _a=64 , ) -> Optional[int]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_auxiliary_loss
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_size
lowerCAmelCase_ = max_size
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = hidden_dim
def __a ( self ) -> Tuple:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
lowerCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_A )
lowerCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_A ) > 0.5
).float()
lowerCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_A ) > 0.5).long()
lowerCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __a ( self ) -> Any:
lowerCAmelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase_ = self.num_queries
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = [1, 1, 1, 1]
lowerCAmelCase_ = self.num_channels
lowerCAmelCase_ = 64
lowerCAmelCase_ = 128
lowerCAmelCase_ = self.hidden_dim
lowerCAmelCase_ = self.hidden_dim
lowerCAmelCase_ = self.hidden_dim
return config
def __a ( self ) -> Any:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __a ( self , _a , _a ) -> Optional[int]:
lowerCAmelCase_ = output.encoder_hidden_states
lowerCAmelCase_ = output.pixel_decoder_hidden_states
lowerCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , config.decoder_layers )
def __a ( self , _a , _a , _a , _a=False ) -> List[str]:
with torch.no_grad():
lowerCAmelCase_ = MaskaFormerModel(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase_ = model(pixel_values=_A , pixel_mask=_A )
lowerCAmelCase_ = model(_A , output_hidden_states=_A )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A , _A )
def __a ( self , _a , _a , _a , _a , _a ) -> List[str]:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(_a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ = model(pixel_values=_A , pixel_mask=_A )
lowerCAmelCase_ = model(_A )
comm_check_on_output(_A )
lowerCAmelCase_ = model(
pixel_values=_A , pixel_mask=_A , mask_labels=_A , class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase__ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = MaskaFormerModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def __a ( self ) -> Any:
self.config_tester.run_common_tests()
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_A , **_A , output_hidden_states=_A )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def __a ( self ) -> List[Any]:
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def __a ( self ) -> List[Any]:
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def __a ( self ) -> Optional[int]:
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def __a ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def __a ( self ) -> Tuple:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ) -> List[str]:
pass
def __a ( self ) -> Any:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_A )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
@slow
def __a ( self ) -> Union[str, Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase_ = MaskaFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = (self.model_tester.min_size,) * 2
lowerCAmelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_A ),
'mask_labels': torch.randn((2, 10, *size) , device=_A ),
'class_labels': torch.zeros(2 , 10 , device=_A ).long(),
}
lowerCAmelCase_ = self.model_tester.get_config()
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation(_A ).to(_A )
lowerCAmelCase_ = model(**_A )
self.assertTrue(outputs.loss is not None )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_A , **_A , output_hidden_states=_A )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_A ).to(_A )
lowerCAmelCase_ = model(**_A , output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def __a ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
lowerCAmelCase_ = self.all_model_classes[1]
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = model_class(_A )
model.to(_A )
model.train()
lowerCAmelCase_ = model(_A , mask_labels=_A , class_labels=_A ).loss
loss.backward()
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.all_model_classes[1]
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_A ).to(_A )
model.train()
lowerCAmelCase_ = model(_A , mask_labels=_A , class_labels=_A )
lowerCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def A():
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __magic_name__ (unittest.TestCase ):
@cached_property
def __a ( self ) -> Any:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __a ( self ) -> Any:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __a ( self ) -> Tuple:
lowerCAmelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_A )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_A , return_tensors="pt" ).to(_A )
lowerCAmelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
lowerCAmelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
lowerCAmelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _A , atol=_A ) )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_A ).eval()
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_A , return_tensors="pt" ).to(_A )
lowerCAmelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase_ = model(**_A )
# masks_queries_logits
lowerCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
lowerCAmelCase_ = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
lowerCAmelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def __a ( self ) -> int:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_A ).eval()
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCAmelCase_ = inputs['pixel_values'].to(_A )
lowerCAmelCase_ = [el.to(_A ) for el in inputs['mask_labels']]
lowerCAmelCase_ = [el.to(_A ) for el in inputs['class_labels']]
with torch.no_grad():
lowerCAmelCase_ = model(**_A )
self.assertTrue(outputs.loss is not None )
| 356
|
def A(__a: Optional[Any] ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = sum(__a )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff
| 22
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ (lowerCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = CTRLTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
lowerCAmelCase_ = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCAmelCase_ = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
lowerCAmelCase_ = {'unk_token': '<unk>'}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__snake_case ) )
def __a ( self , **_a ) -> Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __a ( self , _a ) -> Union[str, Any]:
lowerCAmelCase_ = 'adapt react readapt apt'
lowerCAmelCase_ = 'adapt react readapt apt'
return input_text, output_text
def __a ( self ) -> List[str]:
lowerCAmelCase_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = 'adapt react readapt apt'
lowerCAmelCase_ = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
lowerCAmelCase_ = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
| 357
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22
| 0
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["DPTFeatureExtractor"]
lowerCamelCase__ = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A(__a: str ):
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 22
| 0
|
"""simple docstring"""
import math
def A(__a: Optional[Any] , __a: Tuple ):
if (
not isinstance(__a , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A(__a: Dict , __a: List[str] ):
if (
not isinstance(__a , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 0
|
from torch import nn
def A(__a: Optional[Any] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 360
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22
| 0
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A(__a: Dict , __a: Any ):
lowerCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ = model_name.find("patch" )
lowerCAmelCase_ = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
lowerCAmelCase_ = XCLIPVisionConfig(patch_size=__snake_case , num_frames=__snake_case )
if "large" in model_name:
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3072
lowerCAmelCase_ = 12
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 4096
lowerCAmelCase_ = 16
lowerCAmelCase_ = 24
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = 336
lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(__snake_case , __snake_case )
if "large" in model_name:
lowerCAmelCase_ = 768
return config
def A(__a: Any ):
if name == "token_embedding.weight":
lowerCAmelCase_ = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
lowerCAmelCase_ = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
lowerCAmelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase_ = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
lowerCAmelCase_ = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
lowerCAmelCase_ = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
lowerCAmelCase_ = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
lowerCAmelCase_ = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
lowerCAmelCase_ = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
lowerCAmelCase_ = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
lowerCAmelCase_ = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
lowerCAmelCase_ = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
lowerCAmelCase_ = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
lowerCAmelCase_ = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
lowerCAmelCase_ = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
lowerCAmelCase_ = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def A(__a: Dict , __a: Any ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(__snake_case )
if "attn.in_proj" in key:
lowerCAmelCase_ = key.split("." )
if key.startswith("visual" ):
lowerCAmelCase_ = key_split[3]
lowerCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[
:dim
]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
elif key.startswith("mit" ):
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = rename_key(__snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ = val.T
lowerCAmelCase_ = val
return orig_state_dict
def A(__a: int ):
if num_frames == 8:
lowerCAmelCase_ = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
lowerCAmelCase_ = "eating_spaghetti.npy"
elif num_frames == 32:
lowerCAmelCase_ = "eating_spaghetti_32_frames.npy"
lowerCAmelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=__snake_case , repo_type="dataset" , )
lowerCAmelCase_ = np.load(__snake_case )
return list(__snake_case )
def A(__a: List[str] , __a: Tuple=None , __a: List[str]=False ):
lowerCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
lowerCAmelCase_ = model_to_url[model_name]
lowerCAmelCase_ = 8
if "16-frames" in model_name:
lowerCAmelCase_ = 16
elif "shot" in model_name:
lowerCAmelCase_ = 32
lowerCAmelCase_ = get_xclip_config(__snake_case , __snake_case )
lowerCAmelCase_ = XCLIPModel(__snake_case )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
lowerCAmelCase_ = torch.load(__snake_case , map_location="cpu" )["model"]
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(__snake_case )["model"]
lowerCAmelCase_ = convert_state_dict(__snake_case , __snake_case )
lowerCAmelCase_ = XCLIPModel(__snake_case )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(__snake_case , strict=__snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ = 336 if model_name == "xclip-large-patch14-16-frames" else 224
lowerCAmelCase_ = VideoMAEImageProcessor(size=__snake_case )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase_ = XCLIPProcessor(image_processor=__snake_case , tokenizer=__snake_case )
lowerCAmelCase_ = prepare_video(__snake_case )
lowerCAmelCase_ = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=__snake_case , return_tensors="pt" , padding=__snake_case )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ = model(**__snake_case )
# Verify outputs
lowerCAmelCase_ = outputs.logits_per_video
lowerCAmelCase_ = logits_per_video.softmax(dim=1 )
print("Probs:" , __snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(__snake_case , __snake_case , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
processor.push_to_hub(__snake_case , organization="nielsr" )
slow_tokenizer.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 361
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class __magic_name__ :
def __init__( self ) -> List[str]:
lowerCAmelCase_ = {}
def __a ( self , _a , _a , _a=1 ) -> List[Any]:
if self.graph.get(UpperCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCAmelCase_ = [[w, v]]
if not self.graph.get(UpperCamelCase__ ):
lowerCAmelCase_ = []
def __a ( self ) -> Dict:
return list(self.graph )
def __a ( self , _a , _a ) -> Optional[int]:
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
def __a ( self , _a=-2 , _a=-1 ) -> List[Any]:
if s == d:
return []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
if s == -2:
lowerCAmelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCAmelCase_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
lowerCAmelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def __a ( self , _a=-1 ) -> str:
if c == -1:
lowerCAmelCase_ = floor(random() * 10000 ) + 10
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase_ = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def __a ( self , _a=-2 ) -> int:
lowerCAmelCase_ = deque()
lowerCAmelCase_ = []
if s == -2:
lowerCAmelCase_ = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
lowerCAmelCase_ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __a ( self , _a ) -> str:
lowerCAmelCase_ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __a ( self , _a ) -> Any:
return len(self.graph[u] )
def __a ( self , _a=-2 ) -> Optional[Any]:
lowerCAmelCase_ = []
lowerCAmelCase_ = []
if s == -2:
lowerCAmelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCAmelCase_ = s
lowerCAmelCase_ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCamelCase__ ) != 0:
lowerCAmelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return sorted_nodes
def __a ( self ) -> List[str]:
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCAmelCase_ = -2
lowerCAmelCase_ = []
lowerCAmelCase_ = s
lowerCAmelCase_ = False
lowerCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ = True
if len(UpperCamelCase__ ) != 0:
lowerCAmelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCAmelCase_ = False
indirect_parents.append(UpperCamelCase__ )
lowerCAmelCase_ = s
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCAmelCase_ = -2
lowerCAmelCase_ = []
lowerCAmelCase_ = s
lowerCAmelCase_ = False
lowerCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ = True
if len(UpperCamelCase__ ) != 0:
lowerCAmelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCAmelCase_ = False
indirect_parents.append(UpperCamelCase__ )
lowerCAmelCase_ = s
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def __a ( self , _a=-2 , _a=-1 ) -> str:
lowerCAmelCase_ = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase_ = time()
return end - begin
def __a ( self , _a=-2 ) -> int:
lowerCAmelCase_ = time()
self.bfs(UpperCamelCase__ )
lowerCAmelCase_ = time()
return end - begin
class __magic_name__ :
def __init__( self ) -> str:
lowerCAmelCase_ = {}
def __a ( self , _a , _a , _a=1 ) -> Tuple:
# check if the u exists
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCAmelCase_ = [[w, v]]
# add the other way
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCAmelCase_ = [[w, u]]
def __a ( self , _a , _a ) -> int:
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
# the other way round
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase__ )
def __a ( self , _a=-2 , _a=-1 ) -> Optional[int]:
if s == d:
return []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
if s == -2:
lowerCAmelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCAmelCase_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
lowerCAmelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def __a ( self , _a=-1 ) -> Optional[Any]:
if c == -1:
lowerCAmelCase_ = floor(random() * 10000 ) + 10
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase_ = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def __a ( self , _a=-2 ) -> Optional[Any]:
lowerCAmelCase_ = deque()
lowerCAmelCase_ = []
if s == -2:
lowerCAmelCase_ = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
lowerCAmelCase_ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __a ( self , _a ) -> Union[str, Any]:
return len(self.graph[u] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCAmelCase_ = -2
lowerCAmelCase_ = []
lowerCAmelCase_ = s
lowerCAmelCase_ = False
lowerCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ = True
if len(UpperCamelCase__ ) != 0:
lowerCAmelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCAmelCase_ = False
indirect_parents.append(UpperCamelCase__ )
lowerCAmelCase_ = s
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def __a ( self ) -> str:
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCAmelCase_ = -2
lowerCAmelCase_ = []
lowerCAmelCase_ = s
lowerCAmelCase_ = False
lowerCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ = True
if len(UpperCamelCase__ ) != 0:
lowerCAmelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCAmelCase_ = False
indirect_parents.append(UpperCamelCase__ )
lowerCAmelCase_ = s
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def __a ( self ) -> List[Any]:
return list(self.graph )
def __a ( self , _a=-2 , _a=-1 ) -> str:
lowerCAmelCase_ = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase_ = time()
return end - begin
def __a ( self , _a=-2 ) -> List[Any]:
lowerCAmelCase_ = time()
self.bfs(UpperCamelCase__ )
lowerCAmelCase_ = time()
return end - begin
| 362
|
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.