code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase :List[str] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[Any] = ["""DeiTFeatureExtractor"""]
_lowerCAmelCase :Union[str, Any] = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Dict = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :int = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 251
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCAmelCase :Optional[Any] = """Create a default config file for Accelerate with only a few flags set."""
def __lowerCAmelCase ( a_="no" , a_ = default_json_config_file , a_ = False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Path(a_ )
path.parent.mkdir(parents=a_ , exist_ok=a_ )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
SCREAMING_SNAKE_CASE : List[str] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.device_count()
SCREAMING_SNAKE_CASE : str = num_gpus
SCREAMING_SNAKE_CASE : Dict = False
if num_gpus > 1:
SCREAMING_SNAKE_CASE : List[str] = 'MULTI_GPU'
else:
SCREAMING_SNAKE_CASE : Optional[int] = 'NO'
elif is_xpu_available() and use_xpu:
SCREAMING_SNAKE_CASE : List[str] = torch.xpu.device_count()
SCREAMING_SNAKE_CASE : List[Any] = num_xpus
SCREAMING_SNAKE_CASE : Optional[int] = False
if num_xpus > 1:
SCREAMING_SNAKE_CASE : List[Any] = 'MULTI_XPU'
else:
SCREAMING_SNAKE_CASE : List[Any] = 'NO'
elif is_npu_available():
SCREAMING_SNAKE_CASE : List[str] = torch.npu.device_count()
SCREAMING_SNAKE_CASE : Any = num_npus
SCREAMING_SNAKE_CASE : Dict = False
if num_npus > 1:
SCREAMING_SNAKE_CASE : Any = 'MULTI_NPU'
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'NO'
else:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 'NO'
SCREAMING_SNAKE_CASE : Any = ClusterConfig(**a_ )
config.to_json_file(a_ )
return path
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parser.add_parser('default' , parents=a_ , help=a_ , formatter_class=a_ )
parser.add_argument(
'--config_file' , default=a_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=a_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=a_ )
return parser
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 251
| 1
|
'''simple docstring'''
import os
from pathlib import Path
def __UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
_a = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
_a = F"{src_lang}-{tgt_lang}"
_a = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_a = os.path.join(__lowerCamelCase , "README.md" )
print(F"Generating {path}" )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(__lowerCamelCase )
# make sure we are under the root of the project
lowercase__ = Path(__file__).resolve().parent.parent.parent
lowercase__ = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase__ = model_name.split("-")
lowercase__ = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 707
|
'''simple docstring'''
from math import factorial, pi
def __UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
_a = float(__lowerCamelCase )
_a = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__lowerCamelCase ) )
def __UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
_a = float(__lowerCamelCase )
_a = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 276
| 0
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase( __a : NDArray[floataa] , __a : NDArray[floataa] , __a : list[int] , __a : int , ):
a__ , a__ =coefficient_matrix.shape
a__ , a__ =constant_matrix.shape
if rowsa != colsa:
a__ =f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__a )
if colsa != 1:
a__ =f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__a )
if rowsa != rowsa:
a__ =(
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__a )
if len(__a ) != rowsa:
a__ =(
'Number of initial values must be equal to number of rows in coefficient '
f"""matrix but received {len(__a )} and {rowsa}"""
)
raise ValueError(__a )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
a__ =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
a__ , a__ =table.shape
strictly_diagonally_dominant(__a )
# Iterates the whole matrix for given number of times
for _ in range(__a ):
a__ =[]
for row in range(__a ):
a__ =0
for col in range(__a ):
if col == row:
a__ =table[row][col]
elif col == cols - 1:
a__ =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a__ =(temp + val) / denom
new_val.append(__a )
a__ =new_val
return [float(__a ) for i in new_val]
def _lowercase( __a : NDArray[floataa] ):
a__ , a__ =table.shape
a__ =True
for i in range(0 , __a ):
a__ =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
lowerCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCAmelCase__ = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname ,a_ )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,**a_ ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname ,**a_ )
def SCREAMING_SNAKE_CASE_ ( self ,**a_ ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(a_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowerCAmelCase__ = self.get_image_processor(do_normalize=a_ ,padding_value=1.0 )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(a_ ,return_tensors='np' )
lowerCAmelCase__ = processor(images=a_ ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = processor(text=a_ )
lowerCAmelCase__ = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=a_ ,images=a_ )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(a_ ):
processor()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(a_ )
lowerCAmelCase__ = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=a_ ,images=a_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 193
| 0
|
from __future__ import annotations
from typing import Any
class __snake_case ( _A ):
pass
class __snake_case :
def __init__( self , _A):
SCREAMING_SNAKE_CASE_ = data
SCREAMING_SNAKE_CASE_ = None
def __iter__( self):
SCREAMING_SNAKE_CASE_ = self
SCREAMING_SNAKE_CASE_ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__lowerCamelCase)
yield node.data
SCREAMING_SNAKE_CASE_ = node.next_node
@property
def lowerCAmelCase__ ( self):
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCamelCase__ : Tuple = Node(1)
UpperCamelCase__ : Optional[Any] = Node(2)
UpperCamelCase__ : Dict = Node(3)
UpperCamelCase__ : str = Node(4)
print(root_node.has_loop) # False
UpperCamelCase__ : Optional[int] = root_node.next_node
print(root_node.has_loop) # True
UpperCamelCase__ : Union[str, Any] = Node(5)
UpperCamelCase__ : Dict = Node(6)
UpperCamelCase__ : Optional[int] = Node(5)
UpperCamelCase__ : Dict = Node(6)
print(root_node.has_loop) # False
UpperCamelCase__ : Any = Node(1)
print(root_node.has_loop) # False
| 717
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : torch.FloatTensor
class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ):
@register_to_config
def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ):
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE_ = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1)
SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A)
SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1)
# pass init params to Decoder
SCREAMING_SNAKE_CASE_ = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , )
@apply_forward_hook
def lowerCAmelCase__ ( self , _A , _A = True):
SCREAMING_SNAKE_CASE_ = self.encoder(_A)
SCREAMING_SNAKE_CASE_ = self.quant_conv(_A)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_A)
@apply_forward_hook
def lowerCAmelCase__ ( self , _A , _A = False , _A = True):
# also go through quantization layer
if not force_not_quantize:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A)
else:
SCREAMING_SNAKE_CASE_ = h
SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A)
SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A)
def lowerCAmelCase__ ( self , _A , _A = True):
SCREAMING_SNAKE_CASE_ = sample
SCREAMING_SNAKE_CASE_ = self.encode(_A).latents
SCREAMING_SNAKE_CASE_ = self.decode(_A).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A)
| 620
| 0
|
"""simple docstring"""
import os
from pathlib import Path
def a_ ( ):
'''simple docstring'''
from torch.utils.cpp_extension import load
lowercase__ : Optional[Any] = Path(__snake_case ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowercase__ : int = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , __snake_case , with_cuda=__snake_case , extra_include_paths=[str(__snake_case )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 599
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""),
}) , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE)
}
| 88
| 0
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = BarthezTokenizer
lowerCamelCase__ = BarthezTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def A ( self : List[str] ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__snake_case )
UpperCAmelCase : Any = tokenizer
def A ( self : str ) -> List[str]:
UpperCAmelCase : Any = '''<pad>'''
UpperCAmelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def A ( self : List[Any] ) -> Any:
UpperCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__snake_case ) , 101122 )
def A ( self : int ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase : List[Any] = [0, 57, 3018, 70307, 91, 2]
UpperCAmelCase : List[Any] = self.tokenizer(
__snake_case , max_length=len(__snake_case ) , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCAmelCase : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
def A ( self : List[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase : Tuple = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : Tuple = tokenizer.tokenize(__snake_case )
UpperCAmelCase : int = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : str = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase : Dict = tokenizer.encode(__snake_case )
UpperCAmelCase : str = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def A ( self : str ) -> Optional[Any]:
# fmt: off
UpperCAmelCase : Tuple = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCAmelCase : Tuple = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__snake_case , )
| 713
|
'''simple docstring'''
UpperCamelCase__: dict[tuple[int, int, int], int] = {}
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase : int = _calculate(days - 1 , _lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase : Optional[Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase : Tuple = _calculate(days - 1 , _lowerCAmelCase , 0 )
UpperCAmelCase : str = state_late + state_absent + state_ontime
UpperCAmelCase : List[Any] = prizestrings
return prizestrings
def snake_case_ ( _lowerCAmelCase : int = 30 ) -> int:
return _calculate(_lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 528
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=[1, 2, 1] , _UpperCAmelCase=[2, 2, 4] , _UpperCAmelCase=2 , _UpperCAmelCase=2.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=8 , _UpperCAmelCase=["stage1", "stage2", "stage3"] , _UpperCAmelCase=[1, 2, 3] , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Dict = batch_size
__A : Any = image_size
__A : List[str] = patch_size
__A : List[str] = num_channels
__A : Any = embed_dim
__A : Dict = depths
__A : List[Any] = num_heads
__A : str = window_size
__A : Union[str, Any] = mlp_ratio
__A : str = qkv_bias
__A : Dict = hidden_dropout_prob
__A : Tuple = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : str = hidden_act
__A : str = use_absolute_embeddings
__A : str = patch_norm
__A : Dict = layer_norm_eps
__A : List[str] = initializer_range
__A : str = is_training
__A : Union[str, Any] = scope
__A : int = use_labels
__A : Any = type_sequence_label_size
__A : List[str] = encoder_stride
__A : str = out_features
__A : int = out_indices
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__A : Optional[int] = None
if self.use_labels:
__A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = MaskFormerSwinModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Any = model(_UpperCAmelCase)
__A : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = MaskFormerSwinBackbone(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : List[Any] = model(_UpperCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [13, 16, 16, 16])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [16, 32, 64])
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase):
__A : Optional[int] = ['stem']
__A : Dict = MaskFormerSwinBackbone(config=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.prepare_config_and_inputs()
__A ,__A ,__A : int = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = MaskFormerSwinModelTester(self)
__A : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37)
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase)
@unittest.skip('Swin does not use inputs_embeds')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip('Swin does not support feedforward chunking')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = model_class(_UpperCAmelCase)
__A : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : str = [*signature.parameters.keys()]
__A : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[int] = outputs.hidden_states
__A : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# Swin has a different seq_length
__A : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__A : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Any = self.model_tester.prepare_config_and_inputs_for_common()
__A : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__A : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Any = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[Any] = 3
__A : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__A : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__A : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__A : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__A : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width))
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase):
__A : Any = 0
return t
def check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase={}):
with torch.no_grad():
__A : Dict = model(**_UpperCAmelCase , return_dict=_UpperCAmelCase , **_UpperCAmelCase)
__A : Union[str, Any] = model(**_UpperCAmelCase , return_dict=_UpperCAmelCase , **_UpperCAmelCase).to_tuple()
def recursive_check(_UpperCAmelCase , _UpperCAmelCase):
if isinstance(_UpperCAmelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase , _UpperCAmelCase):
recursive_check(_UpperCAmelCase , _UpperCAmelCase)
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(_UpperCAmelCase , _UpperCAmelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase) , set_nan_tensor_to_zero(_UpperCAmelCase) , atol=1e-5) , msg=(
'Tuple and dict output are not equal. Difference:'
F' {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:'
F' {torch.isnan(_UpperCAmelCase).any()} and `inf`: {torch.isinf(_UpperCAmelCase)}. Dict has'
F' `nan`: {torch.isnan(_UpperCAmelCase).any()} and `inf`: {torch.isinf(_UpperCAmelCase)}.'
) , )
recursive_check(_UpperCAmelCase , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Tuple = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
__A : Dict = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {'output_hidden_states': True})
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
__A : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {'output_hidden_states': True})
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase , a__ ):
lowerCAmelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCAmelCase = MaskFormerSwinConfig
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = MaskFormerSwinModelTester(self)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Tuple = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__A : Optional[int] = backbone_class(_UpperCAmelCase)
backbone.to(_UpperCAmelCase)
backbone.eval()
__A : Tuple = backbone(**_UpperCAmelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _UpperCAmelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
__A : Dict = backbone(**_UpperCAmelCase , output_hidden_states=_UpperCAmelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__A ,__A ,__A : List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
__A : Union[str, Any] = backbone(**_UpperCAmelCase , output_attentions=_UpperCAmelCase)
self.assertIsNotNone(outputs.attentions)
| 8
|
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 8
| 1
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase__ : int = True
UpperCamelCase__ : Dict = '''ml.p3.2xlarge'''
UpperCamelCase__ : List[Any] = '''accelerate_sagemaker_execution_role'''
UpperCamelCase__ : int = '''hf-sm'''
UpperCamelCase__ : List[Any] = '''us-east-1'''
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : List[str] = '''accelerate-sagemaker-1'''
UpperCamelCase__ : Optional[int] = '''1.6'''
UpperCamelCase__ : List[str] = '''4.4'''
UpperCamelCase__ : List[str] = '''train.py'''
UpperCamelCase__ : Tuple = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
UpperCamelCase__ : Optional[int] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , __SCREAMING_SNAKE_CASE)
assert isinstance(converted_args['''do_train'''] , __SCREAMING_SNAKE_CASE)
assert isinstance(converted_args['''epochs'''] , __SCREAMING_SNAKE_CASE)
assert isinstance(converted_args['''learning_rate'''] , __SCREAMING_SNAKE_CASE)
assert isinstance(converted_args['''max_steps'''] , __SCREAMING_SNAKE_CASE)
with pytest.raises(__SCREAMING_SNAKE_CASE):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
| 60
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = GPTSwaTokenizer
UpperCamelCase__ : Dict = False
UpperCamelCase__ : int = True
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = '''This is a test'''
__a = '''This is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''<s>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842])
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__a = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
| 60
| 1
|
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__lowerCamelCase = "sshleifer/mar_enro_6_3_student"
class _lowercase ( __UpperCAmelCase ):
def lowerCAmelCase__ ( self ):
super().setUp()
__magic_name__ = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=UpperCamelCase_ , )
__magic_name__ = f'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def lowerCAmelCase__ ( self ):
MarianMTModel.from_pretrained(UpperCamelCase_ )
@slow
@require_torch_gpu
def lowerCAmelCase__ ( self ):
__magic_name__ = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
__magic_name__ = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
__magic_name__ = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
__magic_name__ = bash_script.replace(UpperCamelCase_ , str(UpperCamelCase_ ) )
__magic_name__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__magic_name__ = f'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__magic_name__ = ['''finetune.py'''] + bash_script.split() + args
with patch.object(UpperCamelCase_ , '''argv''' , UpperCamelCase_ ):
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = pl.Trainer.add_argparse_args(UpperCamelCase_ )
__magic_name__ = SummarizationModule.add_model_specific_args(UpperCamelCase_ , os.getcwd() )
__magic_name__ = parser.parse_args()
__magic_name__ = main(UpperCamelCase_ )
# Check metrics
__magic_name__ = load_json(model.metrics_save_path )
__magic_name__ = metrics['''val'''][0]
__magic_name__ = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , UpperCamelCase_ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__magic_name__ = os.listdir(UpperCamelCase_ )
__magic_name__ = [x for x in contents if x.endswith('''.ckpt''' )][0]
__magic_name__ = os.path.join(args.output_dir , UpperCamelCase_ )
__magic_name__ = torch.load(UpperCamelCase_ , map_location='''cpu''' )
__magic_name__ = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__magic_name__ = {os.path.basename(UpperCamelCase_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class _lowercase ( __UpperCAmelCase ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def lowerCAmelCase__ ( self ):
__magic_name__ = f'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
__magic_name__ = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
__magic_name__ = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
__magic_name__ = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
__magic_name__ = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
__magic_name__ = bash_script.replace(UpperCamelCase_ , str(UpperCamelCase_ ) )
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = bash_script.replace('''--fp16''' , '''''' )
__magic_name__ = 6
__magic_name__ = (
['''distillation.py''']
+ bash_script.split()
+ [
f'''--output_dir={output_dir}''',
'''--gpus=1''',
'''--learning_rate=1e-3''',
f'''--num_train_epochs={epochs}''',
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(UpperCamelCase_ , '''argv''' , UpperCamelCase_ ):
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = pl.Trainer.add_argparse_args(UpperCamelCase_ )
__magic_name__ = SummarizationDistiller.add_model_specific_args(UpperCamelCase_ , os.getcwd() )
__magic_name__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__magic_name__ = distill_main(UpperCamelCase_ )
# Check metrics
__magic_name__ = load_json(model.metrics_save_path )
__magic_name__ = metrics['''val'''][0]
__magic_name__ = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , UpperCamelCase_ )
# check lightning ckpt can be loaded and has a reasonable statedict
__magic_name__ = os.listdir(UpperCamelCase_ )
__magic_name__ = [x for x in contents if x.endswith('''.ckpt''' )][0]
__magic_name__ = os.path.join(args.output_dir , UpperCamelCase_ )
__magic_name__ = torch.load(UpperCamelCase_ , map_location='''cpu''' )
__magic_name__ = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__magic_name__ = {os.path.basename(UpperCamelCase_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 490
|
"""simple docstring"""
def lowercase ( __UpperCamelCase = 10**12 ) -> int:
__magic_name__ = 1
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 490
| 1
|
from statistics import mean, stdev
def __UpperCamelCase ( _A , _A = 3 ):
lowerCAmelCase_ = min(_A )
lowerCAmelCase_ = max(_A )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _A ) for x in data]
def __UpperCamelCase ( _A , _A = 3 ):
lowerCAmelCase_ = mean(_A )
lowerCAmelCase_ = stdev(_A )
# standardize data
return [round((x - mu) / (sigma) , _A ) for x in data]
| 325
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase_ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase_ = 4
lowerCAmelCase_ = 48
lowerCAmelCase_ = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase_ = [6, 6, 6, 6]
lowerCAmelCase_ = 60
lowerCAmelCase_ = [6, 6, 6, 6]
lowerCAmelCase_ = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase_ = 4
lowerCAmelCase_ = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = 126
lowerCAmelCase_ = 7
lowerCAmelCase_ = 2_5_5.0
lowerCAmelCase_ = ''''''
return config
def __UpperCamelCase ( _A , _A ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
lowerCAmelCase_ = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
lowerCAmelCase_ = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
lowerCAmelCase_ = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
lowerCAmelCase_ = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
lowerCAmelCase_ = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
lowerCAmelCase_ = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "conv_first" in name:
lowerCAmelCase_ = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase_ = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase_ = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
lowerCAmelCase_ = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
lowerCAmelCase_ = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
lowerCAmelCase_ = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase_ = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
lowerCAmelCase_ = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
lowerCAmelCase_ = '''swin2sr.''' + name
return name
def __UpperCamelCase ( _A , _A ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(_A )
if "qkv" in key:
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = int(key_split[1] )
lowerCAmelCase_ = int(key_split[4] )
lowerCAmelCase_ = config.embed_dim
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
pass
else:
lowerCAmelCase_ = val
return orig_state_dict
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = get_config(_A )
lowerCAmelCase_ = SwinaSRForImageSuperResolution(_A )
model.eval()
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )
lowerCAmelCase_ = convert_state_dict(_A , _A )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(_A , strict=_A )
if len(_A ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(_A ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
lowerCAmelCase_ = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
lowerCAmelCase_ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase_ = 126 if '''Jpeg''' in checkpoint_url else 256
lowerCAmelCase_ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = transforms(_A ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase_ = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase_ = model(_A )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase_ = torch.Size([1, 3, 512, 512] )
lowerCAmelCase_ = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase_ = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase_ = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase_ = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase_ = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase_ = torch.Size([1, 3, 512, 512] )
lowerCAmelCase_ = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase_ = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase_ = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _A , atol=1E-3 )
print('''Looks ok!''' )
lowerCAmelCase_ = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
lowerCAmelCase_ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 325
| 1
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : bool , lowerCamelCase_ : bool ):
def run_func(lowerCamelCase_ : str ):
@wraps(lowerCamelCase_ )
def run_in_eager_mode(*lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
return func(*lowerCamelCase_ , **lowerCamelCase_ )
@wraps(lowerCamelCase_ )
@tf.function(experimental_compile=lowerCamelCase_ )
def run_in_graph_mode(*lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ):
return func(*lowerCamelCase_ , **lowerCamelCase_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__a : List[str] = random.Random()
__a : Optional[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : TensorFlowBenchmarkArguments
__SCREAMING_SNAKE_CASE : PretrainedConfig
__SCREAMING_SNAKE_CASE : str = "TensorFlow"
@property
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
return tf.__version__
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : int = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[int] = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_inference )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Tuple = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[int] = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_train )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
__a : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[Any] = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_inference )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : List[Any] = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_train )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : str = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__a : Any = (
hasattr(SCREAMING_SNAKE_CASE__ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a : Optional[Any] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__a : List[Any] = __import__('transformers' , fromlist=[model_class] )
__a : Dict = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Any = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__a : Optional[int] = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
__a : List[str] = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , 'vocab_size' ) else config.encoder.vocab_size
__a : Optional[Any] = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
__a : Any = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Optional[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__a : Optional[int] = (
hasattr(SCREAMING_SNAKE_CASE__ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a : List[str] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__a : Dict = __import__('transformers' , fromlist=[model_class] )
__a : str = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__a : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
__a : int = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , 'vocab_size' ) else config.encoder.vocab_size
__a : Tuple = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__a : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
__a : List[str] = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__a : Tuple = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
__a : Tuple = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
__a : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(SCREAMING_SNAKE_CASE__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__a : Optional[Any] = timeit.repeat(
SCREAMING_SNAKE_CASE__ , repeat=self.args.repeat , number=1_0 , )
return min(SCREAMING_SNAKE_CASE__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Callable[[], None] ):
'''simple docstring'''
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
__a : Any = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
__a : Any = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
__a : Tuple = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__a : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = meminfo.used
__a : Dict = Memory(SCREAMING_SNAKE_CASE__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
__a : Any = None
else:
__a : Optional[Any] = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE__ )
__a : Any = Memory(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__a : Any = stop_memory_tracing(SCREAMING_SNAKE_CASE__ )
if memory is None:
__a : Any = summary.total
else:
__a : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 47
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
__magic_name__ = StableDiffusionPanoramaPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
snake_case : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
snake_case : Tuple = DDIMScheduler()
torch.manual_seed(0 )
snake_case : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case : int = CLIPTextModel(_lowercase )
snake_case : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowercase ( self : List[str] , _lowercase : int , _lowercase : Dict=0 ) -> Optional[int]:
snake_case : Union[str, Any] = torch.manual_seed(_lowercase )
snake_case : str = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __lowercase ( self : str ) -> List[str]:
snake_case : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components()
snake_case : Any = StableDiffusionPanoramaPipeline(**_lowercase )
snake_case : List[Any] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Optional[Any] = self.get_dummy_inputs(_lowercase )
snake_case : Union[str, Any] = sd_pipe(**_lowercase ).images
snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : List[Any] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : int ) -> Union[str, Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowercase ( self : Tuple ) -> Tuple:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def __lowercase ( self : Any ) -> List[Any]:
snake_case : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : Union[str, Any] = self.get_dummy_components()
snake_case : Tuple = StableDiffusionPanoramaPipeline(**_lowercase )
snake_case : Tuple = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case : List[str] = self.get_dummy_inputs(_lowercase )
snake_case : int = "french fries"
snake_case : Union[str, Any] = sd_pipe(**_lowercase , negative_prompt=_lowercase )
snake_case : str = output.images
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : Union[str, Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : str ) -> Any:
snake_case : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : List[Any] = StableDiffusionPanoramaPipeline(**_lowercase )
snake_case : int = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Tuple = self.get_dummy_inputs(_lowercase )
snake_case : str = sd_pipe(**_lowercase , view_batch_size=2 )
snake_case : Optional[Any] = output.images
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : str = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : int ) -> Optional[Any]:
snake_case : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : List[str] = self.get_dummy_components()
snake_case : Any = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" )
snake_case : List[str] = StableDiffusionPanoramaPipeline(**_lowercase )
snake_case : List[str] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case : List[Any] = self.get_dummy_inputs(_lowercase )
snake_case : Optional[Any] = sd_pipe(**_lowercase ).images
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : Tuple = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Tuple ) -> Union[str, Any]:
snake_case : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components()
snake_case : Optional[int] = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=_lowercase )
snake_case : Dict = StableDiffusionPanoramaPipeline(**_lowercase )
snake_case : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Dict = self.get_dummy_inputs(_lowercase )
snake_case : Optional[int] = sd_pipe(**_lowercase ).images
snake_case : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : Optional[int] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
def __lowercase ( self : Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict , _lowercase : Any=0 ) -> Optional[Any]:
snake_case : Any = torch.manual_seed(_lowercase )
snake_case : Any = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __lowercase ( self : Optional[Any] ) -> Union[str, Any]:
snake_case : List[Any] = "stabilityai/stable-diffusion-2-base"
snake_case : Dict = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
snake_case : Any = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case : str = self.get_inputs()
snake_case : List[Any] = pipe(**_lowercase ).images
snake_case : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
snake_case : List[str] = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __lowercase ( self : Tuple ) -> List[str]:
snake_case : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=_lowercase )
snake_case : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case : Dict = self.get_inputs()
snake_case : int = pipe(**_lowercase ).images
snake_case : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
snake_case : List[str] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowercase ( self : str ) -> Any:
snake_case : Any = 0
def callback_fn(_lowercase : int , _lowercase : int , _lowercase : torch.FloatTensor ) -> None:
snake_case : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
snake_case : int = latents[0, -3:, -3:, -1]
snake_case : Tuple = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
snake_case : Tuple = latents[0, -3:, -3:, -1]
snake_case : Tuple = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case : Dict = False
snake_case : List[Any] = "stabilityai/stable-diffusion-2-base"
snake_case : List[str] = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
snake_case : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
snake_case : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case : Tuple = self.get_inputs()
pipe(**_lowercase , callback=_lowercase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase ( self : Any ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : Optional[int] = "stabilityai/stable-diffusion-2-base"
snake_case : Tuple = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
snake_case : Any = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
snake_case : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : Any = self.get_inputs()
snake_case : Optional[int] = pipe(**_lowercase )
snake_case : int = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 449
| 0
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase : Optional[Any] = parse(importlib.metadata.version("""torch"""))
def A_ ( A__ , A__ , A__ ) -> Any:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
a__ : List[str] = STR_OPERATION_TO_FUNC[operation]
if isinstance(A__ , A__ ):
a__ : Optional[int] = parse(importlib.metadata.version(A__ ) )
return operation(A__ , parse(A__ ) )
def A_ ( A__ , A__ ) -> int:
return compare_versions(A__ , A__ , A__ )
| 706
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase : Dict = random.Random()
if is_torch_available():
import torch
def A_ ( A__ , A__=1.0 , A__=None , A__=None ) -> Dict:
if rng is None:
a__ : Optional[int] = global_rng
a__ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=400 , lowercase=2000 , lowercase=1 , lowercase=0.0 , lowercase=1_6000 , lowercase=True , lowercase=True , ) -> str:
'''simple docstring'''
a__ : List[str] = parent
a__ : int = batch_size
a__ : Optional[Any] = min_seq_length
a__ : List[str] = max_seq_length
a__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ : List[Any] = feature_size
a__ : int = padding_value
a__ : List[Any] = sampling_rate
a__ : Optional[Any] = return_attention_mask
a__ : int = do_normalize
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowercase ( self , lowercase=False , lowercase=False) -> Tuple:
'''simple docstring'''
def _flatten(lowercase):
return list(itertools.chain(*lowercase))
if equal_length:
a__ : Tuple = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a__ : int = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a__ : Tuple = [np.asarray(lowercase) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : str = ASTFeatureExtractor
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = ASTFeatureExtractionTester(self)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a__ : Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
a__ : Optional[Any] = [np.asarray(lowercase) for speech_input in speech_inputs]
# Test not batched input
a__ : str = feat_extract(speech_inputs[0] , return_tensors='np').input_values
a__ : Optional[int] = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test batched
a__ : int = feat_extract(lowercase , padding=lowercase , return_tensors='np').input_values
a__ : Any = feat_extract(lowercase , padding=lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test 2-D numpy arrays are batched.
a__ : Optional[Any] = [floats_list((1, x))[0] for x in (800, 800, 800)]
a__ : Optional[Any] = np.asarray(lowercase)
a__ : Any = feat_extract(lowercase , return_tensors='np').input_values
a__ : Optional[int] = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
@require_torch
def __lowercase ( self) -> int:
'''simple docstring'''
import torch
a__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__ : Optional[Any] = np.random.rand(100).astype(np.floataa)
a__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a__ : Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
a__ : Tuple = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
a__ : List[str] = ds.sort('id').select(range(lowercase))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69])
# fmt: on
a__ : Any = self._load_datasamples(1)
a__ : Any = ASTFeatureExtractor()
a__ : int = feature_extractor(lowercase , return_tensors='pt').input_values
self.assertEquals(input_values.shape , (1, 1024, 128))
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowercase , atol=1e-4))
| 392
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''roformer'''
def __init__( self ,lowerCamelCase_=50000 ,lowerCamelCase_=None ,lowerCamelCase_=768 ,lowerCamelCase_=12 ,lowerCamelCase_=12 ,lowerCamelCase_=3072 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1536 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1e-12 ,lowerCamelCase_=0 ,lowerCamelCase_=False ,lowerCamelCase_=True ,**lowerCamelCase_ ,) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size if embedding_size is None else embedding_size
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : str = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : str = rotary_value
UpperCAmelCase__ : Dict = use_cache
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase__ : List[str] = {0: '''batch''', 1: '''sequence'''}
UpperCAmelCase__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 614
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : str = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 614
| 1
|
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] = 0 )->list:
_lowerCAmelCase = length or len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCAmelCase = list_data[i + 1], list_data[i]
_lowerCAmelCase = True
return list_data if not swapped else bubble_sort(_SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664
| 0
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 285
|
from typing import Any
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# Creates data structures and fill initial step
UpperCamelCase__ : dict = {}
UpperCamelCase__ : dict = {}
for state in states_space:
UpperCamelCase__ : Optional[int] = observations_space[0]
UpperCamelCase__ : Any = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase__ : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCamelCase__ ) ):
UpperCamelCase__ : str = observations_space[o]
UpperCamelCase__ : Union[str, Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase__ : int = ''''''
UpperCamelCase__ : List[str] = -1
for k_state in states_space:
UpperCamelCase__ : Union[str, Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase__ : Tuple = probability
UpperCamelCase__ : Union[str, Any] = k_state
# Update probabilities and pointers dicts
UpperCamelCase__ : Tuple = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase__ : Optional[Any] = arg_max
# The final observation
UpperCamelCase__ : List[str] = observations_space[len(UpperCamelCase__ ) - 1]
# argmax for given final observation
UpperCamelCase__ : Dict = ''''''
UpperCamelCase__ : Tuple = -1
for k_state in states_space:
UpperCamelCase__ : Any = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase__ : List[str] = probability
UpperCamelCase__ : Tuple = k_state
UpperCamelCase__ : Any = arg_max
# Process pointers backwards
UpperCamelCase__ : List[Any] = last_state
UpperCamelCase__ : int = []
for o in range(len(UpperCamelCase__ ) - 1 , -1 , -1 ):
result.append(UpperCamelCase__ )
UpperCamelCase__ : int = pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validate_not_empty(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
_validate_lists(UpperCamelCase__ , UpperCamelCase__ )
_validate_dicts(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
_validate_list(UpperCamelCase__ , '''observations_space''' )
_validate_list(UpperCamelCase__ , '''states_space''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if not isinstance(_object , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = f'''{var_name} must be a list'''
raise ValueError(UpperCamelCase__ )
else:
for x in _object:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = f'''{var_name} must be a list of strings'''
raise ValueError(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validate_dict(UpperCamelCase__ , '''initial_probabilities''' , UpperCamelCase__ )
_validate_nested_dict(UpperCamelCase__ , '''transition_probabilities''' )
_validate_nested_dict(UpperCamelCase__ , '''emission_probabilities''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
_validate_dict(_object , UpperCamelCase__ , UpperCamelCase__ )
for x in _object.values():
_validate_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ):
if not isinstance(_object , UpperCamelCase__ ):
UpperCamelCase__ : List[str] = f'''{var_name} must be a dict'''
raise ValueError(UpperCamelCase__ )
if not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for x in _object ):
UpperCamelCase__ : Dict = f'''{var_name} all keys must be strings'''
raise ValueError(UpperCamelCase__ )
if not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for x in _object.values() ):
UpperCamelCase__ : Optional[Any] = '''nested dictionary ''' if nested else ''''''
UpperCamelCase__ : Optional[Any] = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 285
| 1
|
import math
class snake_case_ :
'''simple docstring'''
def __init__( self : int , __magic_name__ : Dict=0 ) -> Union[str, Any]: # a graph with Node 0,1,...,N-1
lowerCamelCase_ : Dict = n
lowerCamelCase_ : Union[str, Any] = [
[math.inf for j in range(0 , __magic_name__ )] for i in range(0 , __magic_name__ )
] # adjacency matrix for weight
lowerCamelCase_ : Tuple = [
[math.inf for j in range(0 , __magic_name__ )] for i in range(0 , __magic_name__ )
] # dp[i][j] stores minimum distance from i to j
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple ) -> List[Any]:
lowerCamelCase_ : Optional[int] = w
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCamelCase_ : Optional[int] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : str , __magic_name__ : List[Any] ) -> Optional[Any]:
return self.dp[u][v]
if __name__ == "__main__":
snake_case_ : Union[str, Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 706
|
from collections.abc import Generator
from math import sin
def __a ( __UpperCAmelCase : bytes ) -> bytes:
"""simple docstring"""
if len(__UpperCAmelCase ) != 32:
raise ValueError("Input must be of length 32" )
lowerCamelCase_ : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __a ( __UpperCAmelCase : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
lowerCamelCase_ : Tuple = format(__UpperCAmelCase , "08x" )[-8:]
lowerCamelCase_ : int = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def __a ( __UpperCAmelCase : bytes ) -> bytes:
"""simple docstring"""
lowerCamelCase_ : int = b""
for char in message:
bit_string += format(__UpperCAmelCase , "08b" ).encode("utf-8" )
lowerCamelCase_ : Optional[int] = format(len(__UpperCAmelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __a ( __UpperCAmelCase : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(__UpperCAmelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCAmelCase ) , 512 ):
lowerCamelCase_ : Union[str, Any] = bit_string[pos : pos + 512]
lowerCamelCase_ : Any = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __a ( __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
lowerCamelCase_ : Dict = format(__UpperCAmelCase , "032b" )
lowerCamelCase_ : Dict = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCAmelCase , 2 )
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __a ( __UpperCAmelCase : bytes ) -> bytes:
"""simple docstring"""
lowerCamelCase_ : int = preprocess(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCamelCase_ : List[str] = 0X67_452_301
lowerCamelCase_ : Optional[int] = 0XEF_CDA_B89
lowerCamelCase_ : str = 0X98_BAD_CFE
lowerCamelCase_ : Optional[int] = 0X10_325_476
lowerCamelCase_ : Union[str, Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = aa
lowerCamelCase_ : List[str] = ba
lowerCamelCase_ : Optional[int] = ca
lowerCamelCase_ : List[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCamelCase_ : Dict = d ^ (b & (c ^ d))
lowerCamelCase_ : Any = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCamelCase_ : Any = c ^ (d & (b ^ c))
lowerCamelCase_ : List[Any] = (5 * i + 1) % 16
elif i <= 47:
lowerCamelCase_ : List[Any] = b ^ c ^ d
lowerCamelCase_ : int = (3 * i + 5) % 16
else:
lowerCamelCase_ : str = c ^ (b | not_aa(__UpperCAmelCase ))
lowerCamelCase_ : int = (7 * i) % 16
lowerCamelCase_ : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCamelCase_ : Union[str, Any] = d
lowerCamelCase_ : Optional[int] = c
lowerCamelCase_ : Union[str, Any] = b
lowerCamelCase_ : List[str] = sum_aa(__UpperCAmelCase , left_rotate_aa(__UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCamelCase_ : Tuple = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : List[str] = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Dict = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[int] = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[int] = reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
| 0
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A =datasets.logging.get_logger(__name__)
__A ='''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
__A ='''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
__A ='''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCamelCase__ , sys_doc_lines[doc] , lowerCamelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ , lowerCamelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCamelCase__ , lowerCamelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCamelCase__ , lowerCamelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"Number of resulting singleton clusters in the key "
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"files, respectively" )
return doc_coref_infos
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCamelCase__ , lowerCamelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"conll_score": conll} )
return output_scores
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=True , lowercase=False , lowercase=False , lowercase=False ) -> Union[str, Any]:
lowerCamelCase_ = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 463
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__A =''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _SCREAMING_SNAKE_CASE ( tr.AbstractTransform ):
def __init__( self , lowercase = " " ) -> List[str]:
lowerCamelCase_ = sentence_delimiter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[int]:
return list(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = []
for sent_idx, sentence in enumerate(lowercase ):
chars.extend(self.process_string(lowercase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__A =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__A =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__A ='''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__A ='''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__A ='''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> List[str]:
if concatenate_texts:
return jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )["wer"]
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(lowercase , lowercase ):
lowerCamelCase_ = jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 463
| 1
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = f'''{sampling_rate}'''
SCREAMING_SNAKE_CASE__ = """1"""
SCREAMING_SNAKE_CASE__ = """f32le"""
SCREAMING_SNAKE_CASE__ = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE__ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
SCREAMING_SNAKE_CASE__ = output_stream[0]
SCREAMING_SNAKE_CASE__ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Any = "f32le" , ):
SCREAMING_SNAKE_CASE__ = f'''{sampling_rate}'''
SCREAMING_SNAKE_CASE__ = """1"""
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE__ = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE__ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
SCREAMING_SNAKE_CASE__ = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE__ = """alsa"""
SCREAMING_SNAKE_CASE__ = """default"""
elif system == "Darwin":
SCREAMING_SNAKE_CASE__ = """avfoundation"""
SCREAMING_SNAKE_CASE__ = """:0"""
elif system == "Windows":
SCREAMING_SNAKE_CASE__ = """dshow"""
SCREAMING_SNAKE_CASE__ = """default"""
SCREAMING_SNAKE_CASE__ = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
SCREAMING_SNAKE_CASE__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE__ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Dict , UpperCamelCase__: List[str] = None , UpperCamelCase__: List[Any] = None , UpperCamelCase__: Union[str, Any] = "f32le" , ):
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE__ = stream_chunk_s
else:
SCREAMING_SNAKE_CASE__ = chunk_length_s
SCREAMING_SNAKE_CASE__ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE__ = np.intaa
SCREAMING_SNAKE_CASE__ = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE__ = np.floataa
SCREAMING_SNAKE_CASE__ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
SCREAMING_SNAKE_CASE__ = chunk_length_s / 6
SCREAMING_SNAKE_CASE__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
SCREAMING_SNAKE_CASE__ = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE__ = datetime.datetime.now()
SCREAMING_SNAKE_CASE__ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE__ = np.frombuffer(item["""raw"""] , dtype=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: Dict = False ):
SCREAMING_SNAKE_CASE__ = B""""""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
SCREAMING_SNAKE_CASE__ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
SCREAMING_SNAKE_CASE__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE__ = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE__ = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
SCREAMING_SNAKE_CASE__ = False
yield item
SCREAMING_SNAKE_CASE__ = stride_left
SCREAMING_SNAKE_CASE__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
SCREAMING_SNAKE_CASE__ = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE__ = False
yield item
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE__ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 701
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = "nat"
lowerCamelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :List[Any] , __A :Optional[Any]=4 , __A :Any=3 , __A :Optional[int]=64 , __A :Optional[int]=[3, 4, 6, 5] , __A :Union[str, Any]=[2, 4, 8, 16] , __A :Optional[Any]=7 , __A :Optional[Any]=3.0 , __A :List[Any]=True , __A :int=0.0 , __A :Dict=0.0 , __A :Optional[Any]=0.1 , __A :str="gelu" , __A :Optional[Any]=0.0_2 , __A :Optional[int]=1E-5 , __A :Optional[int]=0.0 , __A :Optional[Any]=None , __A :Union[str, Any]=None , **__A :Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = len(__A )
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = kernel_size
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ = int(embed_dim * 2 ** (len(__A ) - 1) )
SCREAMING_SNAKE_CASE__ = layer_scale_init_value
SCREAMING_SNAKE_CASE__ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__A ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 59
| 0
|
import math
import os
import sys
def lowercase ( __A : str ) -> str:
'''simple docstring'''
snake_case : Dict = """"""
try:
with open(_lowerCamelCase , """rb""" ) as binary_file:
snake_case : Tuple = binary_file.read()
for dat in data:
snake_case : Optional[int] = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase ( __A : dict[str, str] , __A : str , __A : int , __A : str ) -> None:
'''simple docstring'''
lexicon.pop(_lowerCamelCase )
snake_case : Union[str, Any] = last_match_id
if math.loga(_lowerCamelCase ).is_integer():
for curr_key in lexicon:
snake_case : str = """0""" + lexicon[curr_key]
snake_case : List[Any] = bin(_lowerCamelCase )[2:]
def lowercase ( __A : str ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = {"""0""": """0""", """1""": """1"""}
snake_case , snake_case : Optional[Any] = """""", """"""
snake_case : Optional[int] = len(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case : Optional[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
index += 1
snake_case : Union[str, Any] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
snake_case : str = lexicon[curr_string]
result += last_match_id
return result
def lowercase ( __A : str , __A : str ) -> str:
'''simple docstring'''
snake_case : Tuple = os.path.getsize(_lowerCamelCase )
snake_case : Optional[int] = bin(_lowerCamelCase )[2:]
snake_case : Optional[int] = len(_lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase ( __A : str , __A : str ) -> None:
'''simple docstring'''
snake_case : List[str] = 8
try:
with open(_lowerCamelCase , """wb""" ) as opened_file:
snake_case : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_lowerCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase ( __A : str , __A : str ) -> None:
'''simple docstring'''
snake_case : Optional[Any] = read_file_binary(_lowerCamelCase )
snake_case : List[Any] = compress_data(_lowerCamelCase )
snake_case : Dict = add_file_length(_lowerCamelCase , _lowerCamelCase )
write_file_binary(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 36
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=a_ ):
__lowerCAmelCase = ["transformers", "torch", "note_seq"]
def __init__( self , *a_ , **a_ ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case_ ( cls , *a_ , **a_ ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case_ ( cls , *a_ , **a_ ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 370
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class snake_case_ ( a_ ):
__lowerCAmelCase = "mra"
def __init__( self , a_=5_0_2_6_5 , a_=7_6_8 , a_=1_2 , a_=1_2 , a_=3_0_7_2 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1 , a_=0.02 , a_=1e-5 , a_="absolute" , a_=4 , a_="full" , a_=0 , a_=0 , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
a_ : Optional[int] = vocab_size
a_ : Dict = max_position_embeddings
a_ : str = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : Tuple = hidden_act
a_ : List[Any] = hidden_dropout_prob
a_ : List[str] = attention_probs_dropout_prob
a_ : List[Any] = initializer_range
a_ : Dict = type_vocab_size
a_ : Union[str, Any] = layer_norm_eps
a_ : List[str] = position_embedding_type
a_ : Union[str, Any] = block_per_row
a_ : Tuple = approx_mode
a_ : Optional[Any] = initial_prior_first_n_blocks
a_ : List[str] = initial_prior_diagonal_n_blocks
| 370
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
__UpperCAmelCase : str = "segformer.encoder." + key
if key.startswith("backbone" ):
__UpperCAmelCase : Union[str, Any] = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__UpperCAmelCase : str = key[key.find("patch_embed" ) + len("patch_embed" )]
__UpperCAmelCase : Any = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(UpperCamelCase )-1}" )
if "norm" in key:
__UpperCAmelCase : List[str] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__UpperCAmelCase : int = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
__UpperCAmelCase : Optional[Any] = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(UpperCamelCase )-1}" )
if "layer_norm1" in key:
__UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
__UpperCAmelCase : Optional[int] = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
__UpperCAmelCase : Union[str, Any] = key[key.find("block" ) + len("block" )]
__UpperCAmelCase : List[str] = key.replace(f"block{idx}" , f"block.{int(UpperCamelCase )-1}" )
if "attn.q" in key:
__UpperCAmelCase : List[str] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
__UpperCAmelCase : List[Any] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
__UpperCAmelCase : Union[str, Any] = key.replace("attn" , "attention.self" )
if "fc1" in key:
__UpperCAmelCase : int = key.replace("fc1" , "dense1" )
if "fc2" in key:
__UpperCAmelCase : str = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
__UpperCAmelCase : Optional[int] = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
__UpperCAmelCase : Tuple = key.replace("linear_fuse.conv" , "linear_fuse" )
__UpperCAmelCase : Tuple = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__UpperCAmelCase : Tuple = key[key.find("linear_c" ) + len("linear_c" )]
__UpperCAmelCase : Any = key.replace(f"linear_c{idx}" , f"linear_c.{int(UpperCamelCase )-1}" )
if key.startswith("head" ):
__UpperCAmelCase : Optional[int] = key.replace("head" , "classifier" )
__UpperCAmelCase : List[Any] = value
return new_state_dict
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__UpperCAmelCase : Tuple = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
__UpperCAmelCase : Optional[int] = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
__UpperCAmelCase : str = kv_weight[
: config.hidden_sizes[i], :
]
__UpperCAmelCase : List[str] = kv_bias[: config.hidden_sizes[i]]
__UpperCAmelCase : int = kv_weight[
config.hidden_sizes[i] :, :
]
__UpperCAmelCase : int = kv_bias[
config.hidden_sizes[i] :
]
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = SegformerConfig()
__UpperCAmelCase : Optional[int] = False
# set attributes based on model_name
__UpperCAmelCase : Dict = "huggingface/label-files"
if "segformer" in model_name:
__UpperCAmelCase : Any = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
__UpperCAmelCase : Dict = 150
__UpperCAmelCase : Dict = "ade20k-id2label.json"
__UpperCAmelCase : List[Any] = (1, 150, 128, 128)
elif "city" in model_name:
__UpperCAmelCase : Optional[int] = 19
__UpperCAmelCase : Optional[Any] = "cityscapes-id2label.json"
__UpperCAmelCase : Tuple = (1, 19, 128, 128)
else:
raise ValueError(f"Model {model_name} not supported" )
elif "mit" in model_name:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : str = model_name[4:6]
__UpperCAmelCase : Tuple = 1000
__UpperCAmelCase : Any = "imagenet-1k-id2label.json"
__UpperCAmelCase : List[str] = (1, 1000)
else:
raise ValueError(f"Model {model_name} not supported" )
# set config attributes
__UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase : Optional[Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : List[Any] = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
__UpperCAmelCase : Optional[int] = [64, 128, 320, 512]
__UpperCAmelCase : Tuple = 256
elif size == "b2":
__UpperCAmelCase : int = [64, 128, 320, 512]
__UpperCAmelCase : List[str] = 768
__UpperCAmelCase : Optional[int] = [3, 4, 6, 3]
elif size == "b3":
__UpperCAmelCase : Tuple = [64, 128, 320, 512]
__UpperCAmelCase : List[Any] = 768
__UpperCAmelCase : List[str] = [3, 4, 18, 3]
elif size == "b4":
__UpperCAmelCase : Any = [64, 128, 320, 512]
__UpperCAmelCase : Dict = 768
__UpperCAmelCase : List[str] = [3, 8, 27, 3]
elif size == "b5":
__UpperCAmelCase : str = [64, 128, 320, 512]
__UpperCAmelCase : List[Any] = 768
__UpperCAmelCase : List[str] = [3, 6, 40, 3]
else:
raise ValueError(f"Size {size} not supported" )
# load image processor (only resize + normalize)
__UpperCAmelCase : str = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
# prepare image
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : str = image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
if encoder_only:
__UpperCAmelCase : int = torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
else:
__UpperCAmelCase : str = torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
__UpperCAmelCase : int = rename_keys(UpperCamelCase , encoder_only=UpperCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[Any] = SegformerForImageClassification(UpperCamelCase )
else:
__UpperCAmelCase : Tuple = SegformerForSemanticSegmentation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
__UpperCAmelCase : str = model(UpperCamelCase )
__UpperCAmelCase : Any = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
__UpperCAmelCase : List[str] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
__UpperCAmelCase : Any = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
__UpperCAmelCase : Dict = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
__UpperCAmelCase : str = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
__UpperCAmelCase : List[Any] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
__UpperCAmelCase : Dict = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
__UpperCAmelCase : Optional[int] = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
__UpperCAmelCase : Any = torch.tensor(
[
[
[-1.1_3_7_2e0_1, -1.2_7_8_7e0_1, -1.3_4_7_7e0_1],
[-1.2_5_3_6e0_1, -1.4_1_9_4e0_1, -1.4_4_0_9e0_1],
[-1.3_2_1_7e0_1, -1.4_8_8_8e0_1, -1.5_3_2_7e0_1],
],
[
[-1.4_7_9_1e0_1, -1.7_1_2_2e0_1, -1.8_2_7_7e0_1],
[-1.7_1_6_3e0_1, -1.9_1_9_2e0_1, -1.9_5_3_3e0_1],
[-1.7_8_9_7e0_1, -1.9_9_9_1e0_1, -2.0_3_1_5e0_1],
],
[
[7.6_7_2_3e-0_1, 4.1_9_2_1e-0_1, -7.7_8_7_8e-0_2],
[4.7_7_7_2e-0_1, 9.5_5_5_7e-0_3, -2.8_0_8_2e-0_1],
[3.6_0_3_2e-0_1, -2.4_8_2_6e-0_1, -5.1_1_6_8e-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
__UpperCAmelCase : Any = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
__UpperCAmelCase : Dict = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
__UpperCAmelCase : str = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
__UpperCAmelCase : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
A = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 77
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
A_ : List[Any] =logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
a_ = feature_size
a_ = sampling_rate
a_ = padding_value
a_ = kwargs.pop("""padding_side""" , """right""" )
a_ = kwargs.pop("""return_attention_mask""" , _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
a_ = processed_features[self.model_input_names[0]]
a_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCAmelCase ) == 0:
if return_attention_mask:
a_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a_ = required_input[0]
if isinstance(_UpperCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCAmelCase ):
a_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCAmelCase ):
a_ = """tf"""
elif is_torch_tensor(_UpperCAmelCase ):
a_ = """pt"""
elif isinstance(_UpperCAmelCase , (int, float, list, tuple, np.ndarray) ):
a_ = """np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(_UpperCAmelCase )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a_ = to_numpy(_UpperCAmelCase )
else:
a_ = [to_numpy(_UpperCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
a_ = self._get_padding_strategies(padding=_UpperCAmelCase , max_length=_UpperCAmelCase )
a_ = processed_features[self.model_input_names[0]]
a_ = len(_UpperCAmelCase )
if not all(len(_UpperCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
a_ = []
for i in range(_UpperCAmelCase ):
a_ = {k: v[i] for k, v in processed_features.items()}
# truncation
a_ = self._truncate(
_UpperCAmelCase , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
truncated_inputs.append(_UpperCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a_ = PaddingStrategy.MAX_LENGTH
a_ = {}
for i in range(_UpperCAmelCase ):
# padding
a_ = self._pad(
truncated_inputs[i] , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
a_ = []
if value.dtype is np.dtype(np.floataa ):
a_ = value.astype(np.floataa )
batch_outputs[key].append(_UpperCAmelCase )
return BatchFeature(_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
a_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a_ = len(_UpperCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a_ = np.ones(len(_UpperCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
a_ = max_length - len(_UpperCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
a_ = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
a_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a_ = np.pad(
_UpperCAmelCase , _UpperCAmelCase , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a_ = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
a_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a_ = np.pad(
_UpperCAmelCase , _UpperCAmelCase , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
a_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a_ = len(_UpperCAmelCase ) > max_length
if needs_to_be_truncated:
a_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a_ = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowercase__ ( self , _UpperCAmelCase=False , _UpperCAmelCase=None ):
"""simple docstring"""
if padding is not False:
if padding is True:
a_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = PaddingStrategy(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = padding
else:
a_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 483
| 0
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self :int ) -> str:
'''simple docstring'''
a__ = [
[],
[],
[],
]
def _UpperCamelCase ( self :int , __magic_name__ :int , __magic_name__ :int ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(__magic_name__ )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def _UpperCamelCase ( self :Tuple ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__( self :str ) -> str:
'''simple docstring'''
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self :Any ) -> Optional[Any]:
'''simple docstring'''
a__ = []
def _UpperCamelCase ( self :Union[str, Any] , __magic_name__ :int ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(__magic_name__ )
def _UpperCamelCase ( self :List[str] ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
a__ = min(self.queue )
self.queue.remove(__magic_name__ )
return data
def __str__( self :Union[str, Any] ) -> str:
'''simple docstring'''
return str(self.queue )
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
a__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __snake_case ( ) -> Tuple:
"""simple docstring"""
a__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 158
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :List[str]=13 , __magic_name__ :int=30 , __magic_name__ :Optional[Any]=2 , __magic_name__ :Optional[int]=3 , __magic_name__ :List[str]=True , __magic_name__ :Any=True , __magic_name__ :Union[str, Any]=32 , __magic_name__ :List[str]=5 , __magic_name__ :Optional[int]=4 , __magic_name__ :Union[str, Any]=37 , __magic_name__ :str="gelu" , __magic_name__ :Tuple=0.1 , __magic_name__ :List[str]=0.1 , __magic_name__ :List[str]=10 , __magic_name__ :Union[str, Any]=0.02 , ) -> Union[str, Any]:
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ = (image_size // patch_size) ** 2
a__ = num_patches + 1
def _UpperCamelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, pixel_values
def _UpperCamelCase ( self :Union[str, Any] , __magic_name__ :Tuple , __magic_name__ :Dict ) -> List[str]:
'''simple docstring'''
a__ = FlaxViTModel(config=__magic_name__ )
a__ = model(__magic_name__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
a__ = (self.image_size, self.image_size)
a__ = (self.patch_size, self.patch_size)
a__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _UpperCamelCase ( self :str , __magic_name__ :Optional[int] , __magic_name__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
a__ = self.type_sequence_label_size
a__ = FlaxViTForImageClassification(config=__magic_name__ )
a__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ = 1
a__ = FlaxViTForImageClassification(__magic_name__ )
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(__magic_name__ )
def _UpperCamelCase ( self :str ) -> str:
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _UpperCamelCase ( self :Dict ) -> None:
'''simple docstring'''
a__ = FlaxViTModelTester(self )
a__ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def _UpperCamelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _UpperCamelCase ( self :Any ) -> Dict:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def _UpperCamelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__magic_name__ )
a__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _UpperCamelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ = self._prepare_for_class(__magic_name__ , __magic_name__ )
a__ = model_class(__magic_name__ )
@jax.jit
def model_jitted(__magic_name__ :Dict , **__magic_name__ :Dict ):
return model(pixel_values=__magic_name__ , **__magic_name__ )
with self.subTest('''JIT Enabled''' ):
a__ = model_jitted(**__magic_name__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
a__ = model_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
a__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__magic_name__ )
| 158
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCamelCase__ : str = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
__a : Dict = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__a : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__a : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__a : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(
task='text-classification' ,model='hf-internal-testing/tiny-random-distilbert' ,framework='pt' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}] )
SCREAMING_SNAKE_CASE_ : Any = text_classifier('This is great !' ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier(['This is great !', 'This is bad'] ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ) ,[
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] ,)
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier('This is great !' ,top_k=1 )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_classifier('This is great !' ,return_all_scores=snake_case__ )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_classifier('This is great !' ,return_all_scores=snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) ,[[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_classifier(['This is great !', 'Something else'] ,return_all_scores=snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) ,[
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] ,)
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier(['This is great !', 'Something else'] ,return_all_scores=snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) ,[
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] ,)
@require_torch
def snake_case ( self ):
import torch
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline(
task='text-classification' ,model='hf-internal-testing/tiny-random-distilbert' ,framework='pt' ,device=torch.device('cpu' ) ,)
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = pipeline(
task='text-classification' ,model='hf-internal-testing/tiny-random-distilbert' ,framework='tf' )
SCREAMING_SNAKE_CASE_ : int = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = pipeline('text-classification' )
SCREAMING_SNAKE_CASE_ : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'POSITIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'NEGATIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = pipeline('text-classification' ,framework='tf' )
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'POSITIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'NEGATIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'POSITIVE', 'score': 0.988}] )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = TextClassificationPipeline(model=snake_case__ ,tokenizer=snake_case__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
SCREAMING_SNAKE_CASE_ : Dict = 'HuggingFace is in'
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier(snake_case__ )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['HuggingFace is in ', 'Paris is in France']
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_classifier(snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) ,[{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}, {'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}] ,)
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier(snake_case__ ,top_k=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(snake_case__ ) ,[[{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}] * N, [{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}] * N] ,)
SCREAMING_SNAKE_CASE_ : List[str] = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier(snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) ,{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )} ,)
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
SCREAMING_SNAKE_CASE_ : List[str] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(snake_case__ ):
text_classifier(snake_case__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(snake_case__ ) ,[{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}] ,)
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 105
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class snake_case :
def __init__(self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE_ = [1, 2, 3, 4]
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = len(self.first_signal )
SCREAMING_SNAKE_CASE_ = len(self.second_signal )
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE_ = [[0] * max_length for i in range(SCREAMING_SNAKE_CASE_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = deque(self.second_signal )
rotated_signal.rotate(SCREAMING_SNAKE_CASE_ )
for j, item in enumerate(SCREAMING_SNAKE_CASE_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE_ = np.matmul(np.transpose(SCREAMING_SNAKE_CASE_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(SCREAMING_SNAKE_CASE_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 626
| 0
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCamelCase__ : str = "__DUMMY_TRANSFORMERS_USER__"
UpperCamelCase__ : int = "Dummy User"
UpperCamelCase__ : Optional[int] = "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"
UpperCamelCase__ : Dict = "https://hub-ci.huggingface.co"
UpperCamelCase__ : str = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
UpperCamelCase__ : Optional[Any] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
UpperCamelCase__ : List[Any] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( ):
"""simple docstring"""
return HfApi(endpoint=_SCREAMING_SNAKE_CASE )
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : HfApi ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = HfFolder.get_token()
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
def _cleanup_repo(_SCREAMING_SNAKE_CASE : int ):
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
@contextmanager
def _temporary_repo(_SCREAMING_SNAKE_CASE : Tuple ):
try:
yield repo_id
finally:
cleanup_repo(_SCREAMING_SNAKE_CASE )
return _temporary_repo
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : HfApi , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = f"""repo_txt_data-{int(time.time() * 1_0E3 )}"""
SCREAMING_SNAKE_CASE_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo='data/text_data.txt' , repo_id=_SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : HfApi , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = f"""repo_zipped_txt_data-{int(time.time() * 1_0E3 )}"""
SCREAMING_SNAKE_CASE_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo='data.zip' , repo_id=_SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : HfApi , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = f"""repo_zipped_img_data-{int(time.time() * 1_0E3 )}"""
SCREAMING_SNAKE_CASE_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo='data.zip' , repo_id=_SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 620
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCamelCase__ : Optional[int] = logging.getLogger(__name__)
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
SCREAMING_SNAKE_CASE_ = []
# custom device map
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE )
# compatibility with peft
SCREAMING_SNAKE_CASE_ = load_in_abit
SCREAMING_SNAKE_CASE_ = load_in_abit
SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
# convert param to the right dtype
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' )
SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
param.to(_SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
SCREAMING_SNAKE_CASE_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = special_dtypes
SCREAMING_SNAKE_CASE_ = no_split_module_classes
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE_ = get_balanced_memory(
_SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ = max_memory
SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE_ = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
SCREAMING_SNAKE_CASE_ = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE_ = module.bias.data
bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
with init_empty_weights():
SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] )
SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE_ = False
if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ):
SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE_ = list(model.named_children() )
SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE_ = ['.weight', '.bias']
SCREAMING_SNAKE_CASE_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for m in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = param_name
SCREAMING_SNAKE_CASE_ = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE_ = tensor_name.split('.' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE_ = new_module
SCREAMING_SNAKE_CASE_ = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE_ = False
offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
else:
offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 620
| 1
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCAmelCase_ ( nn.Module ):
__a : int
__a : int
__a : float = 0.0
__a : int = 1
__a : int = 1
__a : bool = True
__a : bool = False
__a : bool = False
__a : bool = False
__a : jnp.dtype = jnp.floataa
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE_ : str = FlaxResnetBlockaD(
in_channels=snake_case__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = resnets
SCREAMING_SNAKE_CASE_ : List[Any] = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=True ):
SCREAMING_SNAKE_CASE_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
SCREAMING_SNAKE_CASE_ : Tuple = resnet(snake_case__ ,snake_case__ ,deterministic=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = attn(snake_case__ ,snake_case__ ,deterministic=snake_case__ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE_ : List[Any] = self.downsamplers_a(snake_case__ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
__a : int
__a : int
__a : float = 0.0
__a : int = 1
__a : bool = True
__a : jnp.dtype = jnp.floataa
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE_ : List[str] = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE_ : List[str] = FlaxResnetBlockaD(
in_channels=snake_case__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE_ : Any = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,snake_case__ ,snake_case__ ,snake_case__=True ):
SCREAMING_SNAKE_CASE_ : List[Any] = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE_ : List[Any] = resnet(snake_case__ ,snake_case__ ,deterministic=snake_case__ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE_ : List[str] = self.downsamplers_a(snake_case__ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
__a : int
__a : int
__a : int
__a : float = 0.0
__a : int = 1
__a : int = 1
__a : bool = True
__a : bool = False
__a : bool = False
__a : bool = False
__a : jnp.dtype = jnp.floataa
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = resnets
SCREAMING_SNAKE_CASE_ : Any = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE_ : List[str] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=True ):
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
SCREAMING_SNAKE_CASE_ : Union[str, Any] = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE_ : Dict = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE_ : Any = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = resnet(snake_case__ ,snake_case__ ,deterministic=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = attn(snake_case__ ,snake_case__ ,deterministic=snake_case__ )
if self.add_upsample:
SCREAMING_SNAKE_CASE_ : Tuple = self.upsamplers_a(snake_case__ )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
__a : int
__a : int
__a : int
__a : float = 0.0
__a : int = 1
__a : bool = True
__a : jnp.dtype = jnp.floataa
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE_ : Any = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE_ : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=True ):
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE_ : List[str] = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE_ : str = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resnet(snake_case__ ,snake_case__ ,deterministic=snake_case__ )
if self.add_upsample:
SCREAMING_SNAKE_CASE_ : str = self.upsamplers_a(snake_case__ )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
__a : int
__a : float = 0.0
__a : int = 1
__a : int = 1
__a : bool = False
__a : bool = False
__a : jnp.dtype = jnp.floataa
def snake_case ( self ):
# there is always at least one resnet
SCREAMING_SNAKE_CASE_ : Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
SCREAMING_SNAKE_CASE_ : Tuple = []
for _ in range(self.num_layers ):
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = resnets
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attentions
def __call__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=True ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.resnets[0](snake_case__ ,snake_case__ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
SCREAMING_SNAKE_CASE_ : Tuple = attn(snake_case__ ,snake_case__ ,deterministic=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = resnet(snake_case__ ,snake_case__ ,deterministic=snake_case__ )
return hidden_states
| 105
|
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( __magic_name__="" )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = tempfile.mkdtemp()
return os.path.join(lowerCamelCase__ ,str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Optional[int] = AgentAudio(lowerCAmelCase__ )
snake_case_ : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_, snake_case_ : str = sf.read(lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , torch.tensor(lowerCAmelCase__ ) , atol=1E-4 ) )
def _A ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : int = get_new_path(suffix=".wav" )
sf.write(lowerCAmelCase__ , lowerCAmelCase__ , 16_000 )
snake_case_ : List[str] = AgentAudio(lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase__ )
@require_vision
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> Any:
'''simple docstring'''
snake_case_ : str = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : Optional[Any] = AgentImage(lowerCAmelCase__ )
snake_case_ : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
snake_case_ : Dict = Image.open(lowerCAmelCase__ )
snake_case_ : Dict = AgentImage(lowerCAmelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
def _A ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
snake_case_ : Optional[Any] = Image.open(lowerCAmelCase__ )
snake_case_ : Optional[Any] = AgentImage(lowerCAmelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = "Hey!"
snake_case_ : Optional[int] = AgentText(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , agent_type.to_string() )
self.assertEqual(lowerCAmelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 721
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656
| 0
|
from math import loga
def _lowercase ( __lowerCamelCase : int ) -> List[str]:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class A ( UpperCAmelCase ):
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = SMALL_MODEL_IDENTIFIER
__UpperCAmelCase = '''pt'''
__UpperCAmelCase = '''tf'''
def snake_case__ ( self : int , __a : int ) -> Union[str, Any]:
__UpperCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def snake_case__ ( self : Any , __a : Optional[int] ) -> Tuple:
__UpperCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def snake_case__ ( self : Dict ) -> Dict:
__UpperCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
__UpperCAmelCase = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__UpperCAmelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__UpperCAmelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def snake_case__ ( self : Dict ) -> List[str]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__UpperCAmelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__UpperCAmelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__UpperCAmelCase = FeaturesManager.determine_framework(__a )
def snake_case__ ( self : str ) -> Union[str, Any]:
__UpperCAmelCase = MagicMock(return_value=__a )
with patch('''transformers.onnx.features.is_tf_available''' , __a ):
__UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__UpperCAmelCase = MagicMock(return_value=__a )
with patch('''transformers.onnx.features.is_torch_available''' , __a ):
__UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__UpperCAmelCase = MagicMock(return_value=__a )
__UpperCAmelCase = MagicMock(return_value=__a )
with patch('''transformers.onnx.features.is_tf_available''' , __a ), patch(
'''transformers.onnx.features.is_torch_available''' , __a ):
__UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__UpperCAmelCase = MagicMock(return_value=__a )
__UpperCAmelCase = MagicMock(return_value=__a )
with patch('''transformers.onnx.features.is_tf_available''' , __a ), patch(
'''transformers.onnx.features.is_torch_available''' , __a ):
with self.assertRaises(__a ):
__UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 262
| 0
|
import csv
import tweepy
# Twitter API credentials
snake_case = """"""
snake_case = """"""
snake_case = """"""
snake_case = """"""
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> None:
# authorize twitter, initialize tweepy
_lowercase = tweepy.OAuthHandler(snake_case__ , snake_case__ )
auth.set_access_token(snake_case__ , snake_case__ )
_lowercase = tweepy.API(snake_case__ )
# initialize a list to hold all the tweepy Tweets
_lowercase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_lowercase = api.user_timeline(screen_name=snake_case__ , count=200 )
# save most recent tweets
alltweets.extend(snake_case__ )
# save the id of the oldest tweet less one
_lowercase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(snake_case__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_lowercase = api.user_timeline(
screen_name=snake_case__ , count=200 , max_id=snake_case__ )
# save most recent tweets
alltweets.extend(snake_case__ )
# update the id of the oldest tweet less one
_lowercase = alltweets[-1].id - 1
print(F"""...{len(snake_case__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_lowercase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_lowercase = csv.writer(snake_case__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(snake_case__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 535
|
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE__ ( ) -> Generator[int, None, None]:
_lowercase = {}
_lowercase = 2
while True:
_lowercase = factor_map.pop(snake_case__ , snake_case__ )
if factor:
_lowercase = factor + prime
while x in factor_map:
x += factor
_lowercase = factor
else:
_lowercase = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE__ ( snake_case__ :float = 1E10 ) -> int:
_lowercase = sieve()
_lowercase = 1
while True:
_lowercase = next(snake_case__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(snake_case__ )
n += 2
if __name__ == "__main__":
print(solution())
| 535
| 1
|
'''simple docstring'''
import os
def _A ( ):
lowercase__ = os.path.join(os.path.dirname(lowercase__ ) , """num.txt""" )
with open(lowercase__ ) as file_hand:
return str(sum(int(lowercase__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 325
|
'''simple docstring'''
from typing import Any
class A :
def __init__( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self ) -> str:
'''simple docstring'''
return F'''Node({self.data})'''
class A :
def __init__( self ) -> int:
'''simple docstring'''
lowercase__ = None
def __iter__( self ) -> Any:
'''simple docstring'''
lowercase__ = self.head
while node:
yield node.data
lowercase__ = node.next
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __getitem__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
lowercase__ = self.head
for _ in range(lowerCamelCase__ ):
lowercase__ = current.next
lowercase__ = data
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
lowercase__ = Node(lowerCamelCase__ )
if self.head is None:
lowercase__ = new_node
elif index == 0:
lowercase__ = self.head # link new_node to head
lowercase__ = new_node
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = new_node
def A__ ( self ) -> None: # print every node data
'''simple docstring'''
print(self )
def A__ ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def A__ ( self ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A__ ( self , lowerCamelCase__ = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
lowercase__ = self.head # default first node
if index == 0:
lowercase__ = self.head.next
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = temp.next.next
return delete_node.data
def A__ ( self ) -> bool:
'''simple docstring'''
return self.head is None
def A__ ( self ) -> None:
'''simple docstring'''
lowercase__ = None
lowercase__ = self.head
while current:
# Store the current node's next node.
lowercase__ = current.next
# Make the current node's next point backwards
lowercase__ = prev
# Make the previous node be the current node
lowercase__ = current
# Make the current node the next node (to progress iteration)
lowercase__ = next_node
# Return prev in order to put the head at the end
lowercase__ = prev
def _A ( ):
lowercase__ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase__ ) == i
linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase__ ) == 9
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(-8 , 1 ) )
def _A ( ):
lowercase__ = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
lowercase__ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase__ = linked_list.delete_head()
assert result == -9
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase__ )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(lowercase__ )
print("""\nReading/changing Node data using indexing:""" )
print(f'''Element at Position 1: {linked_list[1]}''' )
lowercase__ = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(lowercase__ )
print(f'''length of linked_list is : {len(lowercase__ )}''' )
if __name__ == "__main__":
main()
| 325
| 1
|
from math import factorial, pi
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict = 30 ) -> float:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
A = float(__SCREAMING_SNAKE_CASE )
A = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( UpperCamelCase__: Dict , UpperCamelCase__: Any = 30 ) -> float:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
A = float(__SCREAMING_SNAKE_CASE )
A = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 701
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowercase : Any = datasets.load_iris()
_lowercase : Tuple = np.array(data["data"])
_lowercase : int = np.array(data["target"])
_lowercase : int = data["target_names"]
_lowercase , _lowercase , _lowercase , _lowercase : Any = train_test_split(X, y)
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: Any ) -> List[Any]:
"""simple docstring"""
return np.linalg.norm(np.array(UpperCamelCase__ ) - np.array(UpperCamelCase__ ) )
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: int , UpperCamelCase__: List[Any] , UpperCamelCase__: int=5 ) -> Tuple:
"""simple docstring"""
A = zip(UpperCamelCase__ , UpperCamelCase__ )
# List of distances of all points from the point to be classified
A = []
for data_point in data:
A = euclidean_distance(data_point[0] , UpperCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A = [i[1] for i in sorted(UpperCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A = Counter(UpperCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 546
| 0
|
def __lowercase ( snake_case ):
"""simple docstring"""
if not head:
return True
# split the list to two parts
__magic_name__ , __magic_name__ :List[str] = head.next, head
while fast and fast.next:
__magic_name__ :str = fast.next.next
__magic_name__ :int = slow.next
__magic_name__ :Optional[int] = slow.next
__magic_name__ :Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
__magic_name__ :Optional[int] = None
while second:
__magic_name__ :List[str] = second.next
__magic_name__ :Dict = node
__magic_name__ :Dict = second
__magic_name__ :Union[str, Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__magic_name__ :int = node.next
__magic_name__ :Dict = head.next
return True
def __lowercase ( snake_case ):
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__magic_name__ :Dict = head
while fast and fast.next:
__magic_name__ , __magic_name__ :Union[str, Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
__magic_name__ :Union[str, Any] = [slow.val]
while slow.next:
__magic_name__ :List[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__magic_name__ :Optional[int] = cur.next
return True
def __lowercase ( snake_case ):
"""simple docstring"""
if not head or not head.next:
return True
__magic_name__ :List[str] = {}
__magic_name__ :Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(snake_case )
else:
__magic_name__ :Optional[Any] = [pos]
__magic_name__ :Optional[Any] = head.next
pos += 1
__magic_name__ :Optional[int] = pos - 1
__magic_name__ :Union[str, Any] = 0
for v in d.values():
if len(snake_case ) % 2 != 0:
middle += 1
else:
__magic_name__ :int = 0
for i in range(0, len(snake_case ) ):
if v[i] + v[len(snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 0
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """open-llama"""
def __init__( self , lowercase=100000 , lowercase=4096 , lowercase=11008 , lowercase=32 , lowercase=32 , lowercase="silu" , lowercase=2048 , lowercase=0.02 , lowercase=1E-6 , lowercase=True , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=True , lowercase=None , **lowercase , ):
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : str = hidden_size
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : List[Any] = rms_norm_eps
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : List[Any] = kwargs.pop(
'use_memorry_efficient_attention' , lowercase )
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Any = attention_dropout_prob
_lowerCamelCase : Any = use_stable_embedding
_lowerCamelCase : Dict = shared_input_output_embedding
_lowerCamelCase : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase , )
def A_ ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
_lowerCamelCase : Union[str, Any] = self.rope_scaling.get('type' , lowercase )
_lowerCamelCase : Optional[Any] = self.rope_scaling.get('factor' , lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowercase , lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 708
|
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ = datasets.logging.get_logger(__name__)
lowercase__ = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
lowercase__ = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
lowercase__ = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def A_ ( self , lowercase ):
if self.config_name == "default":
_lowerCamelCase : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_lowerCamelCase : Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=False ):
if gpus is None:
_lowerCamelCase : Tuple = 1 if torch.cuda.is_available() else 0
_lowerCamelCase : List[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_lowerCamelCase : int = [dict(zip(lowercase , lowercase ) ) for t in zip(*data.values() )]
_lowerCamelCase, _lowerCamelCase : List[str] = self.scorer.predict(lowercase , gpus=lowercase , progress_bar=lowercase )
return {"mean_score": mean_score, "scores": scores}
| 492
| 0
|
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : int = {"""vocab_file""": """vocab.txt"""}
_a : List[str] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_a : Optional[int] = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = collections.OrderedDict()
with open(lowercase , """r""" , encoding="""utf-8""" ) as reader:
__lowerCAmelCase = reader.readlines()
for index, token in enumerate(lowercase ):
__lowerCAmelCase = token.rstrip("""\n""" )
__lowerCAmelCase = index
return vocab
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE=2_00 ):
'''simple docstring'''
__lowerCAmelCase = vocab
__lowerCAmelCase = unk_token
__lowerCAmelCase = max_input_chars_per_word
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = list(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCAmelCase = 0
__lowerCAmelCase = []
while start < len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = None
while start < end:
__lowerCAmelCase = """""".join(chars[start:end] )
if substr in self.vocab:
__lowerCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = end
return sub_tokens
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Dict =VOCAB_FILES_NAMES
a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
a : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =["""input_ids""", """attention_mask"""]
a : Union[str, Any] =False
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<d>",__SCREAMING_SNAKE_CASE="</d>",__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="</n>",__SCREAMING_SNAKE_CASE="</_>",__SCREAMING_SNAKE_CASE="left",**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
requires_backends(self,["""jieba"""] )
super().__init__(
bod_token=__SCREAMING_SNAKE_CASE,eod_token=__SCREAMING_SNAKE_CASE,bos_token=__SCREAMING_SNAKE_CASE,eos_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,line_token=__SCREAMING_SNAKE_CASE,space_token=__SCREAMING_SNAKE_CASE,padding_side=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = bod_token
__lowerCAmelCase = eod_token
__lowerCAmelCase = load_vocab(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.encoder[space_token]
__lowerCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items(),key=lambda __SCREAMING_SNAKE_CASE : x[1] ) )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder,unk_token=self.unk_token )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return dict(self.encoder,**self.added_tokens_encoder )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
for x in jieba.cut(__SCREAMING_SNAKE_CASE,cut_all=__SCREAMING_SNAKE_CASE ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) )
return output_tokens
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = [i for i in token_ids if i >= 0]
__lowerCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return token in self.encoder
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "".join(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.encoder.get(__SCREAMING_SNAKE_CASE,self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.decoder.get(__SCREAMING_SNAKE_CASE,self.unk_token )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__lowerCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
__lowerCAmelCase = 0
if " " in self.encoder:
__lowerCAmelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCAmelCase = self.encoder["""\n"""]
del self.encoder["\n"]
__lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items(),key=lambda __SCREAMING_SNAKE_CASE : x[1] ) )
with open(__SCREAMING_SNAKE_CASE,"""w""",encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__lowerCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE,token_ids_a=__SCREAMING_SNAKE_CASE,already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
| 689
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689
| 1
|
"""simple docstring"""
import numpy as np
def A_ ( __lowercase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 395
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
UpperCamelCase_ : Dict =os.path.abspath(__lowercase )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
UpperCamelCase_ : List[Any] =torch.load(__lowercase , map_location='cpu' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
UpperCamelCase_ : str =convert_pytorch_state_dict_to_flax(__lowercase , __lowercase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase_ : str =convert_pytorch_sharded_state_dict_to_flax(__lowercase , __lowercase )
return flax_state_dict
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase , ):
def is_key_or_prefix_key_in_dict(__lowercase ) -> bool:
return len(set(__lowercase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase_ : Any =pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase_ : Union[str, Any] =pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase_ : Optional[int] =pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase_ : Union[str, Any] =pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase_ : Tuple =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowercase ):
UpperCamelCase_ : List[Any] =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase_ : Optional[Any] =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowercase ):
UpperCamelCase_ : Union[str, Any] =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase_ : Any =pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase_ : Optional[int] =pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase_ : Dict =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase_ : int =pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase_ : List[str] =pt_tuple_key[-2] + '_v'
if name is not None:
UpperCamelCase_ : Optional[int] =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A_ ( __lowercase , __lowercase ):
# convert pytorch tensor to numpy
UpperCamelCase_ : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase_ : Any =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase_ : Dict =flax_model.params['params']
else:
UpperCamelCase_ : Union[str, Any] =flax_model.params
UpperCamelCase_ : Optional[Any] =flatten_dict(__lowercase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase_ : Any =flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(__lowercase )
UpperCamelCase_ : Optional[Any] ={}
UpperCamelCase_ : str =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase_ : Optional[Any] =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase_ : str =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
UpperCamelCase_ : Dict =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase_ : str =pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase_ , UpperCamelCase_ : Tuple =rename_key_and_reshape_tensor(
__lowercase , __lowercase , __lowercase , __lowercase )
# add model prefix if necessary
UpperCamelCase_ : str =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase_ : List[Any] =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase_ : str =jnp.asarray(__lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowercase , __lowercase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : str =jnp.asarray(__lowercase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : str =jnp.asarray(__lowercase )
return unflatten_dict(__lowercase )
def A_ ( __lowercase , __lowercase ):
import torch
# Load the index
UpperCamelCase_ : List[str] ={}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase_ : Dict =torch.load(__lowercase )
UpperCamelCase_ : Tuple ={k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase_ : Optional[int] =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase_ : Union[str, Any] =flax_model.params['params']
UpperCamelCase_ : str =flatten_dict(__lowercase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
UpperCamelCase_ : Optional[Any] =flax_model.params
UpperCamelCase_ : Union[str, Any] =flatten_dict(__lowercase )
UpperCamelCase_ : List[str] =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase_ : Any =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase_ : str =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
UpperCamelCase_ : Optional[Any] =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase_ : Optional[int] =pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase_ , UpperCamelCase_ : Tuple =rename_key_and_reshape_tensor(
__lowercase , __lowercase , __lowercase , __lowercase )
# add model prefix if necessary
UpperCamelCase_ : Tuple =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase_ : int =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase_ : Union[str, Any] =jnp.asarray(__lowercase )
continue
if "var" in flax_key[-1]:
UpperCamelCase_ : Optional[int] =jnp.asarray(__lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowercase , __lowercase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : Dict =jnp.asarray(__lowercase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : int =jnp.asarray(__lowercase )
return unflatten_dict(__lowercase )
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : str =os.path.abspath(__lowercase )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
UpperCamelCase_ : Any =getattr(__lowercase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(__lowercase , 'rb' ) as state_f:
try:
UpperCamelCase_ : Any =from_bytes(__lowercase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__lowercase , __lowercase )
def A_ ( __lowercase , __lowercase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
UpperCamelCase_ : List[Any] =flatten_dict(jax.tree_util.tree_map(lambda __lowercase : x.dtype == jnp.bfloataa , __lowercase ) ).values()
if any(__lowercase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
UpperCamelCase_ : List[Any] =jax.tree_util.tree_map(
lambda __lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowercase )
UpperCamelCase_ : str =flatten_dict(__lowercase )
UpperCamelCase_ : Union[str, Any] =pt_model.state_dict()
UpperCamelCase_ : int =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase_ : Optional[int] =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase_ : Tuple =[]
UpperCamelCase_ : Tuple =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase_ : str =flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase_ : Tuple ='.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase_ : Union[str, Any] =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase_ : int =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowercase ) not in pt_model_dict:
# conv layer
UpperCamelCase_ : Union[str, Any] =flax_key_tuple[:-1] + ('weight',)
UpperCamelCase_ : Optional[int] =jnp.transpose(__lowercase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowercase ) not in pt_model_dict:
# linear layer
UpperCamelCase_ : str =flax_key_tuple[:-1] + ('weight',)
UpperCamelCase_ : List[Any] =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase_ : Optional[Any] =flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase_ : int =flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase_ : int =flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
UpperCamelCase_ : Optional[Any] ='.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase_ : Any ='.'.join(__lowercase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase_ : Optional[Any] ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase_ : List[Any] =key.split('.' )
UpperCamelCase_ : Optional[Any] =None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase_ : Optional[int] =key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase_ : Optional[int] =key_components[-2] + '_v'
if name is not None:
UpperCamelCase_ : str =key_components[:-3] + [name]
UpperCamelCase_ : int ='.'.join(__lowercase )
UpperCamelCase_ : int =key
if flax_key in special_pt_names:
UpperCamelCase_ : str =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCamelCase_ : Any =np.asarray(__lowercase ) if not isinstance(__lowercase , np.ndarray ) else flax_tensor
UpperCamelCase_ : Any =torch.from_numpy(__lowercase )
# remove from missing keys
missing_keys.remove(__lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowercase )
pt_model.load_state_dict(__lowercase )
# re-transform missing_keys to list
UpperCamelCase_ : Tuple =list(__lowercase )
if len(__lowercase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__lowercase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'If your task is similar to the task the model of the checkpoint was trained on, '
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 395
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = ['input_features', 'is_longer']
def __init__( self : Optional[int] , a : Dict=64 , a : Optional[int]=48_000 , a : Optional[Any]=480 , a : int=10 , a : Optional[Any]=1_024 , a : Union[str, Any]=0.0 , a : str=False , a : float = 0 , a : float = 14_000 , a : int = None , a : str = "fusion" , a : str = "repeatpad" , **a : Dict , )-> int:
"""simple docstring"""
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , return_attention_mask=a , **a , )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=a , min_frequency=a , max_frequency=a , sampling_rate=a , norm=a , mel_scale='htk' , )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=a , min_frequency=a , max_frequency=a , sampling_rate=a , norm='slaney' , mel_scale='slaney' , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict[str, Any]:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def SCREAMING_SNAKE_CASE_ ( self : str , a : np.array , a : Optional[np.array] = None )-> np.ndarray:
"""simple docstring"""
lowercase__ = spectrogram(
a , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=a , log_mel='dB' , )
return log_mel_spectrogram.T
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Union[str, Any] , a : str , a : List[str] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
a , size=[chunk_frames, 64] , mode='bilinear' , align_corners=a )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def SCREAMING_SNAKE_CASE_ ( self : Any , a : np.array , a : Dict , a : List[str] , a : str )-> np.array:
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(a ) - max_length
lowercase__ = np.random.randint(0 , overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(a , self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel] , axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(a , a , a )
lowercase__ = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(a ) )
lowercase__ = np.stack(np.tile(a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(a ) )
lowercase__ = np.stack(np.tile(a , a ) )
lowercase__ = np.pad(a , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(a , self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Optional[int] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : str = None , a : Optional[str] = None , a : Optional[int] = None , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , **a : List[str] , )-> BatchFeature:
"""simple docstring"""
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(a )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(a , max_length if max_length else self.nb_max_samples , a , a )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(a )
is_longer.append(a )
if truncation == "fusion" and sum(a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0 , len(a ) )
lowercase__ = True
if isinstance(input_mel[0] , a ):
lowercase__ = [np.asarray(a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'input_features': input_mel, 'is_longer': is_longer}
lowercase__ = BatchFeature(a )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(a )
return input_features
| 235
|
import random
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = a[left_index]
lowercase__ = left_index + 1
for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
lowercase__ , lowercase__ = a[i], a[j]
i += 1
lowercase__ , lowercase__ = a[i - 1], a[left_index]
return i - 1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if left < right:
lowercase__ = random.randint(_SCREAMING_SNAKE_CASE , right - 1 )
lowercase__ , lowercase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase__ = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
quick_sort_random(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def __UpperCamelCase () -> Optional[Any]:
lowercase__ = input('Enter numbers separated by a comma:\n' ).strip()
lowercase__ = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(',' )]
quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 235
| 1
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowercase__ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase )
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : int ) ->List[str]:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
requires_backends(self , '''decord''' )
self.check_model_type(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[Any]=None ) ->str:
UpperCAmelCase_ = {}
if frame_sampling_rate is not None:
UpperCAmelCase_ = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = {}
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , UpperCAmelCase__ : Union[str, List[str]] , **UpperCAmelCase__ : int ) ->int:
return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=1 ) ->str:
if num_frames is None:
UpperCAmelCase_ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
UpperCAmelCase_ = BytesIO(requests.get(UpperCAmelCase__ ).content )
UpperCAmelCase_ = VideoReader(UpperCAmelCase__ )
videoreader.seek(0 )
UpperCAmelCase_ = 0
UpperCAmelCase_ = num_frames * frame_sampling_rate - 1
UpperCAmelCase_ = np.linspace(UpperCAmelCase__ , UpperCAmelCase__ , num=UpperCAmelCase__ , dtype=np.intaa )
UpperCAmelCase_ = videoreader.get_batch(UpperCAmelCase__ ).asnumpy()
UpperCAmelCase_ = list(UpperCAmelCase__ )
UpperCAmelCase_ = self.image_processor(UpperCAmelCase__ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[str] ) ->Tuple:
UpperCAmelCase_ = self.model(**UpperCAmelCase__ )
return model_outputs
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=5 ) ->Tuple:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(UpperCAmelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
| 43
|
'''simple docstring'''
from collections.abc import Callable
def __lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ):
'''simple docstring'''
UpperCAmelCase_ = a
UpperCAmelCase_ = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
UpperCAmelCase_ = mid
else:
UpperCAmelCase_ = mid
UpperCAmelCase_ = start + (end - start) / 2.0
return mid
def __lowerCamelCase ( _UpperCamelCase : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 43
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase: Any =logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
__UpperCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCAmelCase = field(default=UpperCamelCase_ , metadata={"""help""": """Whether tp freeze the encoder."""} )
__UpperCAmelCase = field(default=UpperCamelCase_ , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class lowerCamelCase__ :
__UpperCAmelCase = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__UpperCAmelCase = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
__UpperCAmelCase = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCAmelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCAmelCase = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
__UpperCAmelCase = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCAmelCase = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
__UpperCAmelCase = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
__UpperCAmelCase = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
__UpperCAmelCase = field(default=UpperCamelCase_ , metadata={"""help""": """Source language id for translation."""} )
__UpperCAmelCase = field(default=UpperCamelCase_ , metadata={"""help""": """Target language id for translation."""} )
__UpperCAmelCase = field(default=UpperCamelCase_ , metadata={"""help""": """# num_beams to use for evaluation."""} )
__UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def __snake_case ( __A ,__A ,__A ) -> Optional[int]:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(SCREAMING_SNAKE_CASE__ ,os.path.join(SCREAMING_SNAKE_CASE__ ,F'''{split}_results.json''' ) )
def __snake_case ( ) -> Dict:
lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase : List[Any] = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" ,SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
lowercase : List[str] = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
assert hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
lowercase : str = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path ,from_tf=""".ckpt""" in model_args.model_name_or_path ,config=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir ,)
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__ ,data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase : str = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE__ ,(MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase : Dict = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase : List[str] = SeqaSeqDataset
# Get datasets
lowercase : List[Any] = (
dataset_class(
SCREAMING_SNAKE_CASE__ ,type_path="""train""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_train
else None
)
lowercase : int = (
dataset_class(
SCREAMING_SNAKE_CASE__ ,type_path="""val""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase : Optional[int] = (
dataset_class(
SCREAMING_SNAKE_CASE__ ,type_path="""test""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase : List[Any] = (
build_compute_metrics_fn(data_args.task ,SCREAMING_SNAKE_CASE__ ) if training_args.predict_with_generate else None
)
lowercase : Union[str, Any] = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE__ ,args=SCREAMING_SNAKE_CASE__ ,data_args=SCREAMING_SNAKE_CASE__ ,train_dataset=SCREAMING_SNAKE_CASE__ ,eval_dataset=SCREAMING_SNAKE_CASE__ ,data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,)
lowercase : List[Any] = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase : Dict = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase : List[str] = train_result.metrics
lowercase : List[Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" ,SCREAMING_SNAKE_CASE__ ,training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,"""trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase : List[Any] = trainer.evaluate(metric_key_prefix="""val""" )
lowercase : int = data_args.n_val
lowercase : Union[str, Any] = round(metrics["""val_loss"""] ,4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" ,SCREAMING_SNAKE_CASE__ ,training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase : Tuple = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE__ ,metric_key_prefix="""test""" )
lowercase : Optional[Any] = test_output.metrics
lowercase : int = data_args.n_test
if trainer.is_world_process_zero():
lowercase : List[Any] = round(metrics["""test_loss"""] ,4 )
handle_metrics("""test""" ,SCREAMING_SNAKE_CASE__ ,training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
if training_args.predict_with_generate:
lowercase : Union[str, Any] = tokenizer.batch_decode(
test_output.predictions ,skip_special_tokens=SCREAMING_SNAKE_CASE__ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
lowercase : str = lmap(str.strip ,SCREAMING_SNAKE_CASE__ )
write_txt_file(SCREAMING_SNAKE_CASE__ ,os.path.join(training_args.output_dir ,"""test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE__ ,os.path.join(training_args.output_dir ,"""all_results.json""" ) )
return all_metrics
def __snake_case ( __A ) -> Tuple:
main()
if __name__ == "__main__":
main()
| 607
|
from timeit import timeit
lowerCamelCase = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =0
_lowerCamelCase : Union[str, Any] =len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : List[str] =len(SCREAMING_SNAKE_CASE__ ) // 2
_lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =F'''all({name}(key) is value for key, value in test_data.items())'''
_lowerCamelCase : List[Any] =F'''from __main__ import test_data, {name}'''
_lowerCamelCase : Any =500_000
_lowerCamelCase : Dict =timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 464
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class a__ :
"""simple docstring"""
def __init__( self :int ):
lowercase = {}
def __UpperCAmelCase ( self :Optional[int] , lowercase__ :Optional[int] ):
lowercase = {}
def __UpperCAmelCase ( self :int , lowercase__ :Optional[Any] , lowercase__ :int , lowercase__ :Tuple ):
if nodea not in self.connections:
self.add_node(lowercase__ )
if nodea not in self.connections:
self.add_node(lowercase__ )
lowercase = probability
def __UpperCAmelCase ( self :List[str] ):
return list(self.connections )
def __UpperCAmelCase ( self :Union[str, Any] , lowercase__ :int ):
lowercase = 0
lowercase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase = Counter(graph.get_nodes() )
lowercase = start
for _ in range(SCREAMING_SNAKE_CASE__ ):
lowercase = graph.transition(SCREAMING_SNAKE_CASE__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = XLNetTokenizer
A__ : str = XLNetTokenizerFast
A__ : str = True
A__ : int = True
def __UpperCAmelCase ( self :Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = XLNetTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self :List[str] ):
lowercase = '<s>'
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCAmelCase ( self :Optional[int] ):
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(lowercase__ ) , 1006 )
def __UpperCAmelCase ( self :Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __UpperCAmelCase ( self :Dict ):
lowercase = XLNetTokenizer(lowercase__ , keep_accents=lowercase__ )
lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [285, 46, 10, 170, 382] )
lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowercase = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __UpperCAmelCase ( self :Optional[int] ):
lowercase = XLNetTokenizer(lowercase__ , do_lower_case=lowercase__ )
lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def __UpperCAmelCase ( self :Dict ):
lowercase = XLNetTokenizer(lowercase__ , do_lower_case=lowercase__ )
lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def __UpperCAmelCase ( self :Optional[int] ):
lowercase = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
lowercase = tokenizer.encode('sequence builders' , add_special_tokens=lowercase__ )
lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase__ )
lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __UpperCAmelCase ( self :Dict ):
# fmt: off
lowercase = {'input_ids': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 314
| 0
|
'''simple docstring'''
UpperCAmelCase__ : str = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
UpperCAmelCase__ : Any = ["a", "b", "c", "d", "e"]
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = start
# add current to visited
visited.append(UpperCamelCase_ )
lowerCAmelCase__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCAmelCase__ = topological_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
for vertice in vertices:
if vertice not in visited:
lowerCAmelCase__ = topological_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase__ : Dict = topological_sort("a", [], [])
print(sort)
| 48
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
if len(_UpperCAmelCase ) < k or k < 0:
raise ValueError('Invalid Input' )
lowerCamelCase__ : Tuple = sum(array[:k] )
for i in range(len(_UpperCAmelCase ) - k ):
lowerCamelCase__ : List[Any] = current_sum - array[i] + array[i + k]
lowerCamelCase__ : List[Any] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_UpperCAmelCase : Tuple = [randint(-10_00, 10_00) for i in range(1_00)]
_UpperCAmelCase : List[str] = randint(0, 1_10)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 188
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : Dict = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase ) , torch_builtin(UpperCAmelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase ) , gelu_new(UpperCAmelCase ) ) )
def A_ ( self : Dict ) -> str:
lowerCamelCase__ : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : str = get_activation('gelu' )
lowerCamelCase__ : Tuple = get_activation('gelu_10' )
lowerCamelCase__ : Tuple = torch_builtin(UpperCAmelCase )
lowerCamelCase__ : List[str] = geluaa(UpperCAmelCase )
lowerCamelCase__ : Tuple = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self : str ) -> List[str]:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(UpperCAmelCase ):
get_activation('bogus' )
with self.assertRaises(UpperCAmelCase ):
get_activation(UpperCAmelCase )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = get_activation('gelu' )
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : str = acta.a
| 188
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__(self : List[Any], __UpperCAmelCase : Any, __UpperCAmelCase : Optional[Any]=13, __UpperCAmelCase : Optional[Any]=10, __UpperCAmelCase : int=3, __UpperCAmelCase : int=2, __UpperCAmelCase : Optional[Any]=2, __UpperCAmelCase : Dict=2, __UpperCAmelCase : str=True, __UpperCAmelCase : List[str]=True, __UpperCAmelCase : List[Any]=32, __UpperCAmelCase : Tuple=5, __UpperCAmelCase : Optional[int]=4, __UpperCAmelCase : List[str]=37, __UpperCAmelCase : Any="gelu", __UpperCAmelCase : Optional[int]=0.1, __UpperCAmelCase : Any=0.1, __UpperCAmelCase : str=10, __UpperCAmelCase : Any=0.02, __UpperCAmelCase : Optional[Any]=0.9, __UpperCAmelCase : List[str]=None, ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Any = tubelet_size
SCREAMING_SNAKE_CASE : str = num_frames
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = mask_ratio
SCREAMING_SNAKE_CASE : Dict = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
SCREAMING_SNAKE_CASE : Tuple = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
SCREAMING_SNAKE_CASE : Tuple = int(mask_ratio * self.seq_length )
def lowercase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def lowercase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def lowercase__ (self : int, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Dict, __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = VideoMAEModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ (self : List[str], __UpperCAmelCase : str, __UpperCAmelCase : List[Any], __UpperCAmelCase : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = VideoMAEForPreTraining(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones((self.num_masks,) )
SCREAMING_SNAKE_CASE : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE : List[Any] = mask.expand(self.batch_size, -1 ).bool()
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_, lowerCamelCase_ )
# model only returns predictions for masked patches
SCREAMING_SNAKE_CASE : Dict = mask.sum().item()
SCREAMING_SNAKE_CASE : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a__, a__, unittest.TestCase ):
__magic_name__ : List[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__magic_name__ : Tuple = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
__magic_name__ : Any = False
__magic_name__ : Dict = False
def lowercase__ (self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = VideoMAEModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=37 )
def lowercase__ (self : List[str], __UpperCAmelCase : Optional[Any], __UpperCAmelCase : List[str], __UpperCAmelCase : Any=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(lowerCamelCase_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE : Tuple = torch.ones((self.model_tester.num_masks,) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE : List[Any] = mask.expand(self.model_tester.batch_size, -1 ).bool()
SCREAMING_SNAKE_CASE : str = bool_masked_pos.to(lowerCamelCase_ )
if return_labels:
if model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def lowercase__ (self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def lowercase__ (self : str ) -> str:
"""simple docstring"""
pass
def lowercase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, nn.Linear ) )
def lowercase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def lowercase__ (self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase__ (self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
@slow
def lowercase__ (self : List[str] ) -> Any:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = VideoMAEModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase__ (self : Dict ) -> Tuple:
"""simple docstring"""
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
self.assertEqual(out_len + 1, len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def lowercase__ (self : Dict ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase : Tuple, __UpperCAmelCase : List[str], __UpperCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = outputs.hidden_states
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE : Tuple = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ (self : int ) -> int:
"""simple docstring"""
pass
def __lowercase ():
SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE : Any = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowercase__ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ (self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_video()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_, return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : int = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.3669, -0.0688, -0.2421] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
@slow
def lowercase__ (self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Tuple = prepare_video()
SCREAMING_SNAKE_CASE : int = image_processor(lowerCamelCase_, return_tensors='''pt''' ).to(lowerCamelCase_ )
# add boolean mask, indicating which patches to mask
SCREAMING_SNAKE_CASE : int = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 1408, 1536] )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]], device=lowerCamelCase_ )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase_, atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.5142], device=lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase_, atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
SCREAMING_SNAKE_CASE : List[str] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''', norm_pix_loss=lowerCamelCase_ ).to(
lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor(torch.tensor([0.6469] ), device=lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase_, atol=1e-4 ) )
| 507
|
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 90
| 0
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
__snake_case = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
__snake_case = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
__snake_case = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def lowerCamelCase_ ( self : Tuple , __magic_name__ : Dict , __magic_name__ : List[str] ):
"""simple docstring"""
UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCamelCase = evaluate(dataset=__A , predictions=__A )
return score
| 714
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__snake_case = 3
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
print("""Generating primitive root of p""" )
while True:
UpperCamelCase = random.randrange(3 , SCREAMING_SNAKE_CASE_ )
if pow(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ ) == 1:
continue
if pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) == 1:
continue
return g
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
print("""Generating prime p...""" )
UpperCamelCase = rabin_miller.generate_large_prime(SCREAMING_SNAKE_CASE_ ) # select large prime number.
UpperCamelCase = primitive_root(SCREAMING_SNAKE_CASE_ ) # one primitive root on modulo p.
UpperCamelCase = random.randrange(3 , SCREAMING_SNAKE_CASE_ ) # private_key -> have to be greater than 2 for safety.
UpperCamelCase = cryptomath.find_mod_inverse(pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = (key_size, e_a, e_a, p)
UpperCamelCase = (key_size, d)
return public_key, private_key
def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print("""\nWARNING:""" )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
UpperCamelCase , UpperCamelCase = generate_key(SCREAMING_SNAKE_CASE_ )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , """w""" ) as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , """w""" ) as fo:
fo.write(f'{private_key[0]},{private_key[1]}' )
def _lowercase ( ):
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""elgamal""" , 2_048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 181
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase = 128_022
UpperCamelCase = 128_028
@require_sentencepiece
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = MaMaaaTokenizer
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : int = True
def __UpperCAmelCase( self ):
super().setUp()
__A : str = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__A : Tuple = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__A : Any = Path(self.tmpdirname )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
__A : Dict = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase( self , **__UpperCAmelCase ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase ):
return (
"This is a test",
"This is a test",
)
def __UpperCAmelCase( self ):
__A : Union[str, Any] = "</s>"
__A : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[str] = self.get_tokenizer()
__A : str = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(__UpperCAmelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
__A : Any = self.get_tokenizer()
__A : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2, 3, 4, 5, 6] , )
__A : int = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
__A : List[str] = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , "This is a test" )
@slow
def __UpperCAmelCase( self ):
# fmt: off
__A : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """facebook/m2m100_418M"""
lowerCamelCase_ : List[Any] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCamelCase_ : Dict = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCamelCase_ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def __UpperCAmelCase( cls ):
__A : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
__A : Union[str, Any] = 1
return cls
def __UpperCAmelCase( self ):
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = self.tokenizer.get_vocab()
self.assertEqual(len(__UpperCAmelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[Any] = "en"
__A : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
def __UpperCAmelCase( self ):
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
__A : Tuple = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
__A : Optional[int] = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__A : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[str] = tempfile.mkdtemp()
__A : Optional[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__UpperCAmelCase )
__A : str = MaMaaaTokenizer.from_pretrained(__UpperCAmelCase )
self.assertDictEqual(new_tok.lang_token_to_id , __UpperCAmelCase )
@require_torch
def __UpperCAmelCase( self ):
__A : List[Any] = "en"
__A : Dict = "fr"
__A : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="pt" )
__A : Tuple = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__A : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __UpperCAmelCase( self ):
__A : Optional[Any] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__A : List[Any] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __UpperCAmelCase( self ):
__A : Optional[Any] = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__A : List[str] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __UpperCAmelCase( self ):
__A : Dict = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 520
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
UpperCamelCase = json.load(f)
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self , __UpperCAmelCase ):
return FSMTTokenizer.from_pretrained(__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : str = FSMTForConditionalGeneration.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__A : int = F"facebook/wmt19-{pair}"
__A : Any = self.get_tokenizer(__UpperCAmelCase )
__A : Optional[int] = self.get_model(__UpperCAmelCase )
__A : Optional[int] = bleu_data[pair]["src"]
__A : Optional[int] = bleu_data[pair]["tgt"]
__A : List[Any] = tokenizer(__UpperCAmelCase , return_tensors="pt" , truncation=__UpperCAmelCase , padding="longest" ).to(__UpperCAmelCase )
__A : Dict = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__A : Optional[Any] = tokenizer.batch_decode(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
__A : str = calculate_bleu(__UpperCAmelCase , __UpperCAmelCase )
print(__UpperCAmelCase )
self.assertGreaterEqual(scores["bleu"] , __UpperCAmelCase )
| 520
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''vit_msn'''
def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-06 , lowerCAmelCase_=224 , lowerCAmelCase_=16 , lowerCAmelCase_=3 , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> str:
super().__init__(**lowerCAmelCase_ )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
| 541
|
from collections import deque
from .hash_table import HashTable
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase_ )
_snake_case = self.values[key]
def lowerCAmelCase ( self ) -> Optional[int]:
return (
sum(self.charge_factor - len(lowerCAmelCase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase_ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase_ , lowerCAmelCase_ )
| 541
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCamelCase__ : int = None
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = '''▁'''
lowerCamelCase__ : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ : Optional[Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowerCamelCase__ : Optional[int] = {
'''google/pegasus-xsum''': 5_12,
}
class _UpperCAmelCase ( __a):
__a : List[Any] = VOCAB_FILES_NAMES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Any = PegasusTokenizer
__a : int = ["""input_ids""", """attention_mask"""]
def __init__( self , _A=None , _A=None , _A="<pad>" , _A="</s>" , _A="<unk>" , _A="<mask_2>" , _A="<mask_1>" , _A=None , _A=1_03 , **_A , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(a_ , a_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(a_ )}, but is'''
f''' {type(a_ )}''' )
_UpperCAmelCase : List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(a_ ) , self.offset - 1 )
]
if len(set(a_ ) ) != len(a_ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_UpperCAmelCase : Optional[Any] = additional_special_tokens_extended
else:
_UpperCAmelCase : Optional[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
a_ , tokenizer_file=a_ , pad_token=a_ , eos_token=a_ , unk_token=a_ , mask_token=a_ , mask_token_sent=a_ , offset=a_ , additional_special_tokens=a_ , **a_ , )
_UpperCAmelCase : str = vocab_file
_UpperCAmelCase : List[str] = False if not self.vocab_file else True
def __snake_case ( self , _A ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def __snake_case ( self , _A , _A = None , _A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(a_ )
elif token_ids_a is None:
return self._special_token_mask(a_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __snake_case ( self , _A , _A=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self , _A , _A = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 238
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = '▁'
a = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
a = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
a = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
a = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class a_ ( snake_case ):
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : List[int] = []
UpperCAmelCase : List[int] = []
def __init__( self : int , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any]=None , a_ : str=None , a_ : Tuple="<s>" , a_ : int="</s>" , a_ : int="</s>" , a_ : Any="<pad>" , a_ : Union[str, Any]="<unk>" , a_ : Optional[Any]="m2m100" , a_ : Optional[Dict[str, Any]] = None , a_ : Dict=8 , **a_ : Union[str, Any] , ) -> None:
snake_case: Optional[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
snake_case: Union[str, Any] =language_codes
snake_case: Optional[int] =FAIRSEQ_LANGUAGE_CODES[language_codes]
snake_case: Dict ={lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
snake_case: Optional[int] =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a_ )
for lang_code in fairseq_language_code
if self.get_lang_token(a_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a_ , tgt_lang=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , language_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a_ , **a_ , )
snake_case: int =vocab_file
snake_case: int =load_json(a_ )
snake_case: Tuple ={v: k for k, v in self.encoder.items()}
snake_case: Any =spm_file
snake_case: int =load_spm(a_ , self.sp_model_kwargs )
snake_case: List[Any] =len(self.encoder )
snake_case: Optional[Any] ={
self.get_lang_token(a_ ): self.encoder_size + i for i, lang_code in enumerate(a_ )
}
snake_case: List[str] ={lang_code: self.encoder_size + i for i, lang_code in enumerate(a_ )}
snake_case: int ={v: k for k, v in self.lang_token_to_id.items()}
snake_case: Any =src_lang if src_lang is not None else 'en'
snake_case: Optional[int] =tgt_lang
snake_case: Optional[int] =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
snake_case: str =num_madeup_words
@property
def UpperCamelCase ( self : int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase ( self : Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self : List[Any] , a_ : str ) -> None:
snake_case: Optional[Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self : Optional[int] , a_ : str ) -> List[str]:
return self.sp_model.encode(a_ , out_type=a_ )
def UpperCamelCase ( self : List[Any] , a_ : Optional[int] ) -> Union[str, Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a_ , self.encoder[self.unk_token] )
def UpperCamelCase ( self : str , a_ : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a_ , self.unk_token )
def UpperCamelCase ( self : int , a_ : Optional[Any] ) -> Tuple:
snake_case: List[Any] =[]
snake_case: List[str] =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
snake_case: Optional[Any] =[]
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def UpperCamelCase ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
snake_case: List[Any] =[1] * len(self.prefix_tokens )
snake_case: Optional[int] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def UpperCamelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self : Any ) -> Dict:
snake_case: List[str] ={self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Dict:
snake_case: Optional[int] =self.__dict__.copy()
snake_case: List[Any] =None
return state
def __setstate__( self : int , a_ : Dict ) -> None:
snake_case: Any =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Optional[int] ={}
snake_case: List[str] =load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase ( self : str , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
snake_case: Tuple =Path(a_ )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
snake_case: Union[str, Any] =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
snake_case: List[Any] =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , a_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a_ )
elif not os.path.isfile(self.spm_file ):
with open(a_ , 'wb' ) as fi:
snake_case: Any =self.sp_model.serialized_model_proto()
fi.write(a_ )
return (str(a_ ), str(a_ ))
def UpperCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : str = "en" , a_ : Optional[List[str]] = None , a_ : str = "ro" , **a_ : List[str] , ) -> BatchEncoding:
snake_case: List[str] =src_lang
snake_case: Optional[int] =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def UpperCamelCase ( self : Union[str, Any] , a_ : int , a_ : Optional[str] , a_ : Optional[str] , **a_ : Union[str, Any] ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
snake_case: List[str] =src_lang
snake_case: List[str] =self(a_ , add_special_tokens=a_ , **a_ )
snake_case: List[Any] =self.get_lang_id(a_ )
snake_case: List[str] =tgt_lang_id
return inputs
def UpperCamelCase ( self : str ) -> List[str]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self : Dict ) -> Tuple:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self : List[str] , a_ : str ) -> None:
snake_case: Optional[Any] =self.get_lang_token(a_ )
snake_case: Optional[Any] =self.lang_token_to_id[lang_token]
snake_case: Optional[Any] =[self.cur_lang_id]
snake_case: Optional[int] =[self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , a_ : str ) -> None:
snake_case: Dict =self.get_lang_token(a_ )
snake_case: Tuple =self.lang_token_to_id[lang_token]
snake_case: Optional[int] =[self.cur_lang_id]
snake_case: List[str] =[self.eos_token_id]
def UpperCamelCase ( self : Union[str, Any] , a_ : str ) -> str:
return self.lang_code_to_token[lang]
def UpperCamelCase ( self : int , a_ : str ) -> int:
snake_case: Union[str, Any] =self.get_lang_token(a_ )
return self.lang_token_to_id[lang_token]
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
snake_case: Dict =sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def a_ ( __UpperCAmelCase ) -> Union[Dict, List]:
"""simple docstring"""
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
"""simple docstring"""
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 350
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
A_ = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
A_ = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
A_ = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 479
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowercase :
lowercase = LEDConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=13 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=99 , __lowerCamelCase : Any=32 , __lowerCamelCase : str=2 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=20 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Any=4 , ) -> str:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = eos_token_id
lowercase = pad_token_id
lowercase = bos_token_id
lowercase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowercase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowercase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __a ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowercase = prepare_led_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase = tf.concat(
[tf.zeros_like(__lowerCamelCase )[:, :-1], tf.ones_like(__lowerCamelCase )[:, -1:]] , axis=-1 , )
lowercase = global_attention_mask
return config, inputs_dict
def __a ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase = TFLEDModel(config=__lowerCamelCase ).get_decoder()
lowercase = inputs_dict['''input_ids''']
lowercase = input_ids[:1, :]
lowercase = inputs_dict['''attention_mask'''][:1, :]
lowercase = 1
# first forward pass
lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
lowercase ,lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase = output_from_no_past[:, -3:, random_slice_idx]
lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase=None, UpperCAmelCase=None, UpperCAmelCase=None, UpperCAmelCase=None, )-> str:
"""simple docstring"""
if attention_mask is None:
lowercase = tf.cast(tf.math.not_equal(UpperCAmelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowercase ( _A , _A , unittest.TestCase ):
lowercase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def __a ( self : Dict ) -> int:
'''simple docstring'''
lowercase = TFLEDModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCamelCase )
def __a ( self : Optional[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : List[str] ) -> Any:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def __a ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = tf.zeros_like(inputs_dict['''attention_mask'''] )
lowercase = 2
lowercase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
lowercase = True
lowercase = self.model_tester.seq_length
lowercase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowerCamelCase : int ):
lowercase = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowerCamelCase : Optional[Any] ):
lowercase = [t.numpy() for t in outputs.encoder_attentions]
lowercase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowercase = True
lowercase = False
lowercase = False
lowercase = model_class(__lowerCamelCase )
lowercase = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
lowercase = model_class(__lowerCamelCase )
lowercase = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(__lowerCamelCase )
lowercase = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(__lowerCamelCase )
lowercase = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __a ( self : Dict ) -> str:
'''simple docstring'''
pass
def __a ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
return tf.constant(UpperCAmelCase, dtype=tf.intaa )
A_ = 1e-4
@slow
@require_tf
class __lowercase ( unittest.TestCase ):
def __a ( self : List[Any] ) -> str:
'''simple docstring'''
lowercase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
lowercase = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
lowercase = model(**__lowerCamelCase )[0]
lowercase = (1, 10_24, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
lowercase = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-3 )
def __a ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
lowercase = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
lowercase = model(**__lowerCamelCase )[0]
lowercase = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
lowercase = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-3 , rtol=1E-3 )
| 479
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
A__ : Optional[str] =field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
A__ : Optional[int] =field(default=2 , metadata={"""help""": """Batch size for training."""} )
A__ : Optional[int] =field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
A__ : Optional[float] =field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
A__ : Optional[int] =field(
default=1_0_0_0_0 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
A__ : Optional[float] =field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} )
A__ : Optional[str] =field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
A__ : Optional[int] =field(
default=7_5_0 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
A__ : Optional[int] =field(
default=1_6 , metadata={"""help""": """Number of gradient accumulation steps."""} )
A__ : Optional[bool] =field(
default=_UpperCAmelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
A__ : Optional[int] =field(default=5_0_0_0_0 , metadata={"""help""": """Maximum number of training steps."""} )
A__ : Optional[int] =field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
A__ : Optional[int] =field(default=1_0_2_4 , metadata={"""help""": """Sequence lengths used for training."""} )
A__ : Optional[int] =field(default=1 , metadata={"""help""": """Training seed."""} )
A__ : Optional[int] =field(
default=1_0_2_4 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
A__ : Optional[str] =field(
default=_UpperCAmelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
A__ : Optional[bool] =field(default=_UpperCAmelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
A__ : Optional[int] =field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
A__ : Optional[int] =field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
A__ : Optional[int] =field(default=1_0_2_4 , metadata={"""help""": """Length of sequences to be evaluated."""} )
A__ : Optional[int] =field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
A__ : Optional[int] =field(default=_UpperCAmelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
A__ : Optional[int] =field(
default=_UpperCAmelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
A__ : Optional[bool] =field(
default=_UpperCAmelCase , metadata={"""help""": """Sample from the language model's output distribution."""} )
A__ : Optional[float] =field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
A__ : Optional[int] =field(default=2_5_6 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
A__ : Optional[int] =field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
A__ : Optional[float] =field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
A__ : Optional[int] =field(default=1_0 , metadata={"""help""": """Number of generations to run in parallel."""} )
A__ : Optional[int] =field(
default=2_0_0 , metadata={"""help""": """Number of completions to generate for each sample."""} )
A__ : Optional[int] =field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
A__ : Optional[str] =field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
A__ : Optional[str] =field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
A__ : Optional[int] =field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowercase__ :
A__ : Optional[int] =field(
default=_UpperCAmelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
A__ : Optional[str] =field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
A__ : Optional[str] =field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
A__ : Optional[int] =field(
default=1_0_0_0_0_0 , metadata={"""help""": """Number of files to save per JSON output file."""} )
A__ : Optional[str] =field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
A__ : Optional[float] =field(
default=1_0_0_0 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
A__ : Optional[float] =field(
default=1_0_0 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
A__ : Optional[float] =field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
A__ : Optional[float] =field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
A__ : Optional[float] =field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
A__ : Optional[bool] =field(
default=_UpperCAmelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
A__ : Optional[float] =field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
A__ : Optional[str] =field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
A__ : Optional[str] =field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
A__ : Optional[int] =field(default=2_0_0_0_0_0 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
A__ : Optional[int] =field(
default=3_2_7_6_8 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
A__ : Optional[str] =field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
A__ : Optional[bool] =field(default=_UpperCAmelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
A__ : Optional[str] =field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
A__ : Optional[int] =field(default=_UpperCAmelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
A__ : Optional[str] =field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
A__ : Optional[bool] =field(default=_UpperCAmelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 472
| 0
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case_ ( A_ : Union[str, Any], A_ : Optional[int]=7 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = None
if token is not None:
_lowerCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_lowerCamelCase : Any = "636036"
_lowerCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_lowerCamelCase : Dict = requests.get(lowercase_, headers=lowercase_ ).json()
return result["workflow_runs"]
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_lowerCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_lowerCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def snake_case_ ( A_ : str, A_ : List[str], A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_lowerCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_, token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_lowerCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_, artifact_url=lowercase_, output_dir=lowercase_, token=lowercase_ )
def snake_case_ ( A_ : str, A_ : Dict, A_ : int ):
'''simple docstring'''
get_last_daily_ci_artifacts(lowercase_, lowercase_, lowercase_ )
_lowerCamelCase : Dict = {}
for artifact_name in artifact_names:
_lowerCamelCase : Union[str, Any] = os.path.join(lowercase_, F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_lowerCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_lowerCamelCase : int = f.read().decode('''UTF-8''' )
return results
| 707
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def snake_case_ ( A_ : Optional[int], A_ : int, A_ : int=8 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __snake_case ( _lowercase):
def __init__( self : List[str] , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , )
_lowerCamelCase : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
if latents is None:
_lowerCamelCase : Optional[int] = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_lowerCamelCase : Any = latents.to(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Any=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_lowerCamelCase : Tuple = torch.device(f'''cuda:{gpu_id}''' )
_lowerCamelCase : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_lowerCamelCase : Optional[int] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase , _lowerCamelCase : List[str] = cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : Optional[int] , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int = 5_1_2 , __lowerCAmelCase : int = 5_1_2 , __lowerCAmelCase : int = 1_0_0 , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
_lowerCamelCase : int = self._execution_device
_lowerCamelCase : List[Any] = guidance_scale > 1.0
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : List[Any] = torch.cat(__lowerCAmelCase , dim=0 )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Dict = torch.cat(__lowerCAmelCase , dim=0 )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : List[str] = torch.cat(__lowerCAmelCase , dim=0 )
_lowerCamelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
_lowerCamelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
_lowerCamelCase : int = hint.repeat_interleave(__lowerCAmelCase , dim=0 )
_lowerCamelCase : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
_lowerCamelCase : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Tuple = self.movq.config.latent_channels
_lowerCamelCase , _lowerCamelCase : List[Any] = downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor )
# create initial latent
_lowerCamelCase : Optional[int] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : int = {'''image_embeds''': image_embeds, '''hint''': hint}
_lowerCamelCase : List[str] = self.unet(
sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase : str = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.chunk(2 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = variance_pred.chunk(2 )
_lowerCamelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase , _lowerCamelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Any = self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_lowerCamelCase : Union[str, Any] = image * 0.5 + 0.5
_lowerCamelCase : List[Any] = image.clamp(0 , 1 )
_lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : Union[str, Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 598
| 0
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = []
if len(__lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = nums.pop(0 )
lowerCamelCase__ = permute(__lowerCAmelCase )
for perm in permutations:
perm.append(__lowerCAmelCase )
result.extend(__lowerCAmelCase )
nums.append(__lowerCAmelCase )
return result
def A__ ( __lowerCAmelCase : List[Any] ):
def backtrack(__lowerCAmelCase : Optional[int] ):
if start == len(__lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ = nums[i], nums[start]
backtrack(start + 1 )
lowerCamelCase__ , lowerCamelCase__ = nums[i], nums[start] # backtrack
lowerCamelCase__ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCamelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 50
|
'''simple docstring'''
from torch import nn
class A ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ ) -> List[Any]:
super().__init__()
_a = class_size
_a = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_a = nn.Linear(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_a = self.mlp(snake_case_ )
return logits
| 131
| 0
|
'''simple docstring'''
import logging
from transformers import PretrainedConfig
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "bertabs"
def __init__( self , UpperCamelCase__=30522 , UpperCamelCase__=512 , UpperCamelCase__=6 , UpperCamelCase__=512 , UpperCamelCase__=8 , UpperCamelCase__=512 , UpperCamelCase__=0.2 , UpperCamelCase__=6 , UpperCamelCase__=768 , UpperCamelCase__=8 , UpperCamelCase__=2048 , UpperCamelCase__=0.2 , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = max_pos
A_ = enc_layers
A_ = enc_hidden_size
A_ = enc_heads
A_ = enc_ff_size
A_ = enc_dropout
A_ = dec_layers
A_ = dec_hidden_size
A_ = dec_heads
A_ = dec_ff_size
A_ = dec_dropout
| 667
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : str = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 515
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =GPTSanJapaneseTokenizer
__lowerCamelCase : List[Any] =False
__lowerCamelCase : List[str] ={'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
super().setUp()
# fmt: off
__a = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__a = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__a = {"""unk_token""": """<unk>"""}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCamelCase_ ( self : Dict , **__lowercase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__a = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a , __a = self.get_input_output_texts(__lowercase )
__a = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__a = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.get_tokenizer()
# Testing tokenization
__a = """こんにちは、世界。 こんばんは、㔺界。"""
__a = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__a = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__a = tokens + [tokenizer.unk_token]
__a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.get_tokenizer()
# Testing tokenization
__a = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__a = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__a = tokenizer.encode(__lowercase )
__a = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a = """こんにちは、世界。"""
__a = """こんばんは、㔺界。😀"""
__a = """こんにちは、世界。こんばんは、世界。😀"""
__a = tokenizer.encode(prefix_text + input_text )
__a = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__a = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__a = tokenizer.decode(__lowercase )
__a = tokenizer.decode(__lowercase )
__a = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a = """こんにちは、世界。"""
__a = """こんばんは、㔺界。😀"""
__a = len(tokenizer.encode(__lowercase ) ) - 2
__a = len(tokenizer.encode(__lowercase ) ) - 2
__a = [1] + [0] * (len_prefix + len_text + 1)
__a = [1] * (len_prefix + len_text + 1) + [0]
__a = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__a = tokenizer(prefix_text + input_text ).token_type_ids
__a = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__a = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a = tokenizer.encode("""あンいワ""" )
__a = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__a = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__a = tokenizer(__lowercase , padding=__lowercase )
__a = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__a = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__a = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__a = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# tokenizer has no padding token
pass
| 225
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowercase = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
A_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
A_ = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase__ , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
A_ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=None ) -> Any:
"""simple docstring"""
A_ = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A_ = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
A_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
A_ = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
A_ = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(lowerCamelCase__ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A_ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , lowerCamelCase__ ) , )
# Copy consistency with a really long name
A_ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , lowerCamelCase__ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , lowerCamelCase__ ) , )
def UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
A_ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
A_ ,A_ = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme['''format_model_list'''] )
self.assertFalse(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
A_ ,A_ = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase__ )
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A_ ,A_ = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 712
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowercase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A_ = '''lm_head'''
A_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
A_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
A_ = True
else:
for key, mapped_key in MAPPING.items():
A_ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
A_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
A_ = '''weight_g'''
elif "weight_v" in name:
A_ = '''weight_v'''
elif "bias" in name:
A_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = '''weight'''
else:
A_ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = full_name.split('''conv_layers.''' )[-1]
A_ = name.split('''.''' )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
A_ = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
A_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
A_ = Dictionary.load_from_json(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ = target_dict.pad_index
A_ = target_dict.bos_index
A_ = target_dict.eos_index
A_ = len(target_dict.symbols )
A_ = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
A_ = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ = 42
A_ = 43
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
A_ = True if config.feat_extract_norm == '''layer''' else False
A_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
A_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A_ = UniSpeechForCTC(SCREAMING_SNAKE_CASE )
else:
A_ = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE )
if is_finetuned:
A_ ,A_ ,A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
A_ ,A_ ,A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowercase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 563
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ = frozenset([] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCAmelCase , )
UpperCAmelCase : int = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
UpperCAmelCase : Union[str, Any] = CLIPTextModel(_lowerCAmelCase )
UpperCAmelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self : List[Any] , lowercase : Any , lowercase : List[Any]=0 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) )
UpperCAmelCase : List[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase : Tuple = torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : str = StableDiffusionInpaintPipeline(**_lowerCAmelCase )
UpperCAmelCase : Dict = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase : int = sd_pipe(**_lowerCAmelCase ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Dict = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
UpperCAmelCase : Union[str, Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase : int = StableDiffusionInpaintPipeline.from_pretrained(_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase : Any = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Tuple = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , generator=_lowerCAmelCase , output_type="np" , )
UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCAmelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
UpperCAmelCase : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase : Any = StableDiffusionInpaintPipeline.from_pretrained(
_lowerCAmelCase , torch_dtype=torch.floataa , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase : Union[str, Any] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase : Any = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , generator=_lowerCAmelCase , output_type="np" , )
UpperCAmelCase : str = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCAmelCase : Union[str, Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase : Any = PNDMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
UpperCAmelCase : int = StableDiffusionInpaintPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , scheduler=_lowerCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : Union[str, Any] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[Any] = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 595
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = 0
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE: Optional[Any] = Path(_lowerCAmelCase ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE: List[str] = Path(_lowerCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowerCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowerCAmelCase , '''w''' ) )
__SCREAMING_SNAKE_CASE: Dict = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE: int = Path(_lowerCAmelCase ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE: Tuple = Path(_lowerCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowerCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowerCAmelCase , '''w''' ) )
__SCREAMING_SNAKE_CASE: str = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE: int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE: Any = Path(_lowerCAmelCase ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE: Optional[Any] = Path(_lowerCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowerCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowerCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE: Any = AutoImageProcessor.from_pretrained(_lowerCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
__SCREAMING_SNAKE_CASE: List[str] = CLIPImageProcessor(**_lowerCAmelCase )
# save in new folder
model_config.save_pretrained(_lowerCAmelCase )
config.save_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE: int = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE: Optional[int] = Path(_lowerCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowerCAmelCase , '''w''' ) , )
__SCREAMING_SNAKE_CASE: int = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowerCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
__SCREAMING_SNAKE_CASE: Tuple = AutoImageProcessor.from_pretrained('''clip-base''' )
def snake_case_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowerCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__SCREAMING_SNAKE_CASE: Dict = AutoImageProcessor.from_pretrained(_lowerCAmelCase , revision='''aaaaaa''' )
def snake_case_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowerCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__SCREAMING_SNAKE_CASE: int = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case_ ( self ):
"""simple docstring"""
with self.assertRaises(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = AutoImageProcessor.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def snake_case_ ( self ):
"""simple docstring"""
try:
AutoConfig.register('''custom''' , _lowerCAmelCase )
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE: Optional[int] = Path(_lowerCAmelCase ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE: Union[str, Any] = Path(_lowerCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowerCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowerCAmelCase , '''w''' ) )
__SCREAMING_SNAKE_CASE: List[str] = CustomImageProcessor.from_pretrained(_lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case_ ( self ):
"""simple docstring"""
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = True
try:
AutoConfig.register('''custom''' , _lowerCAmelCase )
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE: str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE: Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE: Any = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_lowerCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 202
| 0
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = LayoutLMTokenizer
A__ : Dict = LayoutLMTokenizerFast
A__ : Union[str, Any] = True
A__ : List[Any] = True
def A__ ( self: Union[str, Any] ) -> Any:
super().setUp()
UpperCAmelCase_ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: str ) -> List[str]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ = """UNwant\u00E9d,running"""
UpperCAmelCase_ = """unwanted, running"""
return input_text, output_text
def A__ ( self: str ) -> Tuple:
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase_ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[7, 4, 5, 10, 8, 9] )
def A__ ( self: int ) -> Tuple:
pass
| 700
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def lowerCamelCase_ ( _a : Optional[int] ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 322
| 0
|
from ...configuration_utils import PretrainedConfig
__A = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 'tapas'
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Tuple=30522 , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3072 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1024 , __SCREAMING_SNAKE_CASE : int=[3, 256, 256, 2, 256, 256, 10] , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : int=1e-12 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Dict=10.0 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : List[str]=1.0 , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=1.0 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=1.0 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]="ratio" , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : int=64 , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> int:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =hidden_act
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_sizes
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
# Fine-tuning task hyperparameters
__UpperCAmelCase =positive_label_weight
__UpperCAmelCase =num_aggregation_labels
__UpperCAmelCase =aggregation_loss_weight
__UpperCAmelCase =use_answer_as_supervision
__UpperCAmelCase =answer_loss_importance
__UpperCAmelCase =use_normalized_answer_loss
__UpperCAmelCase =huber_loss_delta
__UpperCAmelCase =temperature
__UpperCAmelCase =aggregation_temperature
__UpperCAmelCase =use_gumbel_for_cells
__UpperCAmelCase =use_gumbel_for_aggregation
__UpperCAmelCase =average_approximation_function
__UpperCAmelCase =cell_selection_preference
__UpperCAmelCase =answer_loss_cutoff
__UpperCAmelCase =max_num_rows
__UpperCAmelCase =max_num_columns
__UpperCAmelCase =average_logits_per_cell
__UpperCAmelCase =select_one_column
__UpperCAmelCase =allow_empty_column_selection
__UpperCAmelCase =init_cell_selection_weights_to_zero
__UpperCAmelCase =reset_position_index_per_cell
__UpperCAmelCase =disable_per_token_loss
# Aggregation hyperparameters
__UpperCAmelCase =aggregation_labels
__UpperCAmelCase =no_aggregation_label_index
if isinstance(self.aggregation_labels , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase ={int(__SCREAMING_SNAKE_CASE ): v for k, v in aggregation_labels.items()}
| 68
|
import re
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(_A , _A ) )
if __name__ == "__main__":
_A = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 431
| 0
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ : int = str(abs(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : Optional[int] = [list(SCREAMING_SNAKE_CASE_ ) for char in range(len(SCREAMING_SNAKE_CASE_ ) )]
for index in range(len(SCREAMING_SNAKE_CASE_ ) ):
num_transpositions[index].pop(SCREAMING_SNAKE_CASE_ )
return max(
int(''''''.join(list(SCREAMING_SNAKE_CASE_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 700
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = BloomTokenizerFast
SCREAMING_SNAKE_CASE = BloomTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = "tokenizer_file"
SCREAMING_SNAKE_CASE = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
lowercase__ : List[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : List[str] = self.get_rust_tokenizer()
lowercase__ : Union[str, Any] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase__ : Dict = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase__ : List[Any] = tokenizer.batch_encode_plus(__lowerCAmelCase )['''input_ids''']
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase=6 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ : str = '''This is a simple input'''
lowercase__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ : Dict = ('''This is a simple input''', '''This is a pair''')
lowercase__ : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.batch_encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.encode(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.batch_encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase__ : List[str] = None # Hotfixing padding = None
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Dict = self.get_rust_tokenizer()
lowercase__ : Any = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=__lowerCAmelCase )
lowercase__ : Optional[Any] = next(iter(__lowerCAmelCase ) )['''premise'''] # pick up one data
lowercase__ : List[str] = list(sample_data.values() )
lowercase__ : str = list(map(tokenizer.encode , __lowerCAmelCase ) )
lowercase__ : List[str] = [tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) for x in output_tokens]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 428
| 0
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=True , UpperCAmelCase_="pt" ) ->List[Any]:
snake_case__ = {'add_prefix_space': True} if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not line.startswith(' ' ) else {}
snake_case__ = padding_side
return tokenizer(
[line] , max_length=__UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , ) ->Tuple:
snake_case__ = input_ids.ne(__UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="train" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="" , ) -> Optional[Any]:
super().__init__()
snake_case__ = Path(UpperCamelCase_ ).joinpath(type_path + '.source' )
snake_case__ = Path(UpperCamelCase_ ).joinpath(type_path + '.target' )
snake_case__ = self.get_char_lens(self.src_file )
snake_case__ = max_source_length
snake_case__ = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
snake_case__ = tokenizer
snake_case__ = prefix
if n_obs is not None:
snake_case__ = self.src_lens[:n_obs]
snake_case__ = src_lang
snake_case__ = tgt_lang
def __len__( self ) -> Optional[int]:
return len(self.src_lens )
def __getitem__( self , UpperCamelCase_ ) -> str:
snake_case__ = index + 1 # linecache starts at 1
snake_case__ = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase_ ).rstrip('\n' )
snake_case__ = linecache.getline(str(self.tgt_file ) , UpperCamelCase_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case__ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase_ ) else self.tokenizer
)
snake_case__ = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase_ ) else self.tokenizer
snake_case__ = encode_line(UpperCamelCase_ , UpperCamelCase_ , self.max_source_length , 'right' )
snake_case__ = encode_line(UpperCamelCase_ , UpperCamelCase_ , self.max_target_length , 'right' )
snake_case__ = source_inputs['input_ids'].squeeze()
snake_case__ = target_inputs['input_ids'].squeeze()
snake_case__ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _snake_case ( UpperCamelCase_ ) -> str:
return [len(UpperCamelCase_ ) for x in Path(UpperCamelCase_ ).open().readlines()]
def _snake_case ( self , UpperCamelCase_ ) -> List[str]:
snake_case__ = torch.stack([x['input_ids'] for x in batch] )
snake_case__ = torch.stack([x['attention_mask'] for x in batch] )
snake_case__ = torch.stack([x['decoder_input_ids'] for x in batch] )
snake_case__ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase_ )
else self.tokenizer.pad_token_id
)
snake_case__ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase_ )
else self.tokenizer.pad_token_id
)
snake_case__ = trim_batch(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ , snake_case__ = trim_batch(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ )
snake_case__ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
a__ : Any = getLogger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ ) ->Optional[Any]:
return list(itertools.chain.from_iterable(__UpperCAmelCase ) )
def __lowerCamelCase ( UpperCAmelCase_ ) ->None:
snake_case__ = get_git_info()
save_json(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'git_log.json' ) )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=4 , **UpperCAmelCase_ ) ->Union[str, Any]:
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( UpperCAmelCase_ ) ->Dict:
with open(__UpperCAmelCase ) as f:
return json.load(__UpperCAmelCase )
def __lowerCamelCase ( ) ->List[str]:
snake_case__ = git.Repo(search_parent_directories=__UpperCAmelCase )
snake_case__ = {
'repo_id': str(__UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->List:
return list(map(__UpperCAmelCase , __UpperCAmelCase ) )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
with open(__UpperCAmelCase , 'wb' ) as f:
return pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCamelCase ( UpperCAmelCase_ ) ->str:
def remove_articles(UpperCAmelCase_ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , __UpperCAmelCase )
def white_space_fix(UpperCAmelCase_ ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase_ ):
snake_case__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCAmelCase ) ) ) )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
snake_case__ = normalize_answer(__UpperCAmelCase ).split()
snake_case__ = normalize_answer(__UpperCAmelCase ).split()
snake_case__ = Counter(__UpperCAmelCase ) & Counter(__UpperCAmelCase )
snake_case__ = sum(common.values() )
if num_same == 0:
return 0
snake_case__ = 1.0 * num_same / len(__UpperCAmelCase )
snake_case__ = 1.0 * num_same / len(__UpperCAmelCase )
snake_case__ = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->str:
return normalize_answer(__UpperCAmelCase ) == normalize_answer(__UpperCAmelCase )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
snake_case__ = 0
for hypo, pred in zip(__UpperCAmelCase , __UpperCAmelCase ):
em += exact_match_score(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
em /= len(__UpperCAmelCase )
return {"em": em}
def __lowerCamelCase ( UpperCAmelCase_ ) ->Optional[Any]:
return model_prefix.startswith('rag' )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
snake_case__ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case__ = 'dropout_rate'
for p in extra_params:
if getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if not hasattr(__UpperCAmelCase , __UpperCAmelCase ) and not hasattr(__UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
continue
snake_case__ = p if hasattr(__UpperCAmelCase , __UpperCAmelCase ) else equivalent_param[p]
setattr(__UpperCAmelCase , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
return hparams, config
| 368
|
'''simple docstring'''
import os
from math import logaa
def __magic_name__ ( __UpperCAmelCase = "base_exp.txt" ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(map(__UpperCAmelCase , line.split(""",""" ) ) )
if x * logaa(__UpperCAmelCase ) > largest:
__SCREAMING_SNAKE_CASE = x * logaa(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = i + 1
return result
if __name__ == "__main__":
print(solution())
| 109
| 0
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( __UpperCAmelCase ):
'''simple docstring'''
snake_case_ = prime_factors(snake_case__ )
if is_square_free(snake_case__ ):
return -1 if len(snake_case__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
snake_case_ = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
snake_case_ = Dataset.from_dict(__UpperCAmelCase )
return dataset
class a ( _lowerCamelCase ):
def A_ ( self : Optional[int] ):
snake_case_ = get_dataset()
snake_case_ = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A_ ( self : Tuple ):
snake_case_ = get_dataset()
snake_case_ ,snake_case_ = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowercase_ )
| 593
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Any = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 445
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
snake_case : Dict = random.Random()
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=None ):
"""simple docstring"""
if rng is None:
a :str = global_rng
a :List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=400 , _lowerCamelCase=2000 , _lowerCamelCase=1 , _lowerCamelCase=0.0 , _lowerCamelCase=1_6000 , _lowerCamelCase=True , _lowerCamelCase=80 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase="hann_window" , _lowerCamelCase=80 , _lowerCamelCase=7600 , _lowerCamelCase=1e-10 , _lowerCamelCase=True , ):
a :Tuple = parent
a :Optional[int] = batch_size
a :Tuple = min_seq_length
a :List[Any] = max_seq_length
a :str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a :Optional[int] = feature_size
a :List[Any] = padding_value
a :Dict = sampling_rate
a :Union[str, Any] = do_normalize
a :str = num_mel_bins
a :Tuple = hop_length
a :Optional[int] = win_length
a :Any = win_function
a :Dict = fmin
a :Optional[int] = fmax
a :Optional[Any] = mel_floor
a :Dict = return_attention_mask
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=False , _lowerCamelCase=False ):
def _flatten(_lowerCamelCase ):
return list(itertools.chain(*_lowerCamelCase ) )
if equal_length:
a :List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a :str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a :Dict = [np.asarray(_lowerCamelCase ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=False , _lowerCamelCase=False ):
if equal_length:
a :Union[str, Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a :Optional[int] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a :Optional[int] = [np.asarray(_lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
self.assertTrue(np.all(np.mean(_lowerCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a :str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a :Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a :Union[str, Any] = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
a :List[str] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
a :int = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
# Test batched
a :Tuple = feat_extract(_lowerCamelCase , return_tensors='''np''' ).input_values
a :str = feat_extract(_lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a :Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a :Union[str, Any] = ['''longest''', '''max_length''', '''do_not_pad''']
a :List[Any] = [None, 1600, None]
for max_length, padding in zip(_lowerCamelCase , _lowerCamelCase ):
a :int = feat_extract(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors='''np''' )
a :List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a :Tuple = range(800 , 1400 , 200 )
a :Dict = [floats_list((1, x) )[0] for x in lengths]
a :List[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
a :Any = [None, 1600, None]
for max_length, padding in zip(_lowerCamelCase , _lowerCamelCase ):
a :Tuple = feat_extract(_lowerCamelCase , max_length=_lowerCamelCase , padding=_lowerCamelCase )
a :Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a :Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a :Optional[int] = feat_extract(
_lowerCamelCase , truncation=_lowerCamelCase , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
a :str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a :Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a :Any = feat_extract(
_lowerCamelCase , truncation=_lowerCamelCase , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
a :List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a :List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a :str = feat_extract(
_lowerCamelCase , truncation=_lowerCamelCase , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
a :Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a :Any = np.random.rand(100 ).astype(np.floataa )
a :Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a :Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a :str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a :Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a :Tuple = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
a :List[Any] = feature_extractor(audio_target=_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a :List[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
a :Any = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
# Test batched
a :str = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_values
a :Union[str, Any] = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a :Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a :Optional[int] = np.asarray(_lowerCamelCase )
a :List[Any] = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_values
a :List[Any] = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a :Any = feat_extract.model_input_names[0]
a :List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCamelCase ) == len(_lowerCamelCase ) for x, y in zip(_lowerCamelCase , processed_features[input_name] ) ) )
a :Any = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCamelCase )
a :Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
a :Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a :Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCamelCase )
a :Tuple = self.feature_extraction_class(**self.feat_extract_dict )
a :List[Any] = feat_extract.model_input_names[0]
a :List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
a :Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a :List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.feature_extraction_class(**self.feat_extract_dict )
a :str = self.feat_extract_tester.prepare_inputs_for_target()
a :Optional[int] = feat_extract.model_input_names[0]
a :str = BatchFeature({input_name: speech_inputs} )
a :Dict = feat_extract.num_mel_bins # hack!
a :Optional[Any] = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
a :List[str] = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.feat_extract_dict
a :Any = True
a :Union[str, Any] = self.feature_extraction_class(**_lowerCamelCase )
a :int = self.feat_extract_tester.prepare_inputs_for_target()
a :Dict = [len(_lowerCamelCase ) for x in speech_inputs]
a :List[Any] = feat_extract.model_input_names[0]
a :Optional[int] = BatchFeature({input_name: speech_inputs} )
a :List[Any] = feat_extract.num_mel_bins # hack!
a :Optional[Any] = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.feat_extract_dict
a :str = True
a :Any = self.feature_extraction_class(**_lowerCamelCase )
a :Any = self.feat_extract_tester.prepare_inputs_for_target()
a :Dict = [len(_lowerCamelCase ) for x in speech_inputs]
a :Tuple = feat_extract.model_input_names[0]
a :int = BatchFeature({input_name: speech_inputs} )
a :Optional[Any] = min(_lowerCamelCase )
a :Dict = feat_extract.num_mel_bins # hack!
a :Dict = feat_extract.pad(
_lowerCamelCase , padding='''max_length''' , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
from datasets import load_dataset
a :List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
a :List[str] = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self ):
# fmt: off
a :Dict = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
a :List[Any] = self._load_datasamples(1 )
a :Any = SpeechTaFeatureExtractor()
a :Optional[int] = feature_extractor(_lowerCamelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCamelCase , atol=1e-6 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
# fmt: off
a :str = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
a :int = self._load_datasamples(1 )
a :int = SpeechTaFeatureExtractor()
a :List[Any] = feature_extractor(audio_target=_lowerCamelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCamelCase , atol=1e-4 ) )
| 445
| 1
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.0_2 , _a=["stage2", "stage3", "stage4"] , _a=3 , _a=None , ):
lowercase : Union[str, Any] = parent
lowercase : List[str] = batch_size
lowercase : Optional[int] = image_size
lowercase : Optional[int] = num_channels
lowercase : str = num_stages
lowercase : Union[str, Any] = hidden_sizes
lowercase : List[str] = depths
lowercase : str = is_training
lowercase : int = use_labels
lowercase : str = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : str = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : int = out_features
lowercase : Tuple = num_labels
lowercase : Union[str, Any] = scope
lowercase : str = num_stages
def __magic_name__ ( self ):
lowercase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : int = None
if self.use_labels:
lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __magic_name__ ( self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_a , loss_ignore_index=255 , num_labels=self.num_labels , )
def __magic_name__ ( self , _a , _a , _a ):
lowercase : Optional[int] = UperNetForSemanticSegmentation(config=_a )
model.to(_a )
model.eval()
lowercase : Union[str, Any] = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __magic_name__ ( self ):
lowercase : List[str] = self.prepare_config_and_inputs()
(
lowercase
) : Dict = config_and_inputs
lowercase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a_, a_, unittest.TestCase ):
__lowerCAmelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__lowerCAmelCase = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def __magic_name__ ( self ):
lowercase : List[str] = UperNetModelTester(self )
lowercase : Optional[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __magic_name__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ):
return
def __magic_name__ ( self ):
lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] = model_class(_a )
lowercase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Any = [*signature.parameters.keys()]
lowercase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def __magic_name__ ( self ):
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __magic_name__ ( self ):
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __magic_name__ ( self ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __magic_name__ ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
def check_hidden_states_output(_a , _a , _a ):
lowercase : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase : int = model(**self._prepare_for_class(_a , _a ) )
lowercase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : int = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : int = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Dict = True
check_hidden_states_output(_a , _a , _a )
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : List[str] = _config_zero_init(_a )
lowercase : str = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowercase : List[str] = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __magic_name__ ( self ):
pass
@slow
def __magic_name__ ( self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = UperNetForSemanticSegmentation.from_pretrained(_a )
self.assertIsNotNone(_a )
def __magic_name__ ( ) -> List[str]:
lowercase : Optional[int] = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
lowercase : List[str] = Image.open(__snake_case ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Union[str, Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
lowercase : str = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(_a )
lowercase : List[str] = prepare_img()
lowercase : Tuple = processor(images=_a , return_tensors="pt" ).to(_a )
with torch.no_grad():
lowercase : Optional[Any] = model(**_a )
lowercase : Optional[int] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _a )
lowercase : Tuple = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1E-4 ) )
def __magic_name__ ( self ):
lowercase : int = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
lowercase : Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(_a )
lowercase : Union[str, Any] = prepare_img()
lowercase : Dict = processor(images=_a , return_tensors="pt" ).to(_a )
with torch.no_grad():
lowercase : List[Any] = model(**_a )
lowercase : Dict = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _a )
lowercase : List[str] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1E-4 ) )
| 718
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_A : List[Any] = """"""
_A : Dict = """"""
_A : Optional[Any] = """"""
_A : Dict = 1 # (0 is vertical, 1 is horizontal)
def __magic_name__ ( ) -> None:
lowercase , lowercase : List[Any] = get_dataset(__snake_case , __snake_case )
print("Processing..." )
lowercase , lowercase , lowercase : Tuple = update_image_and_anno(__snake_case , __snake_case , __snake_case )
for index, image in enumerate(__snake_case ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase : Tuple = random_chars(32 )
lowercase : Optional[int] = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
lowercase : Optional[Any] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__snake_case )} with {file_name}""" )
lowercase : List[Any] = []
for anno in new_annos[index]:
lowercase : Union[str, Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__snake_case )
with open(f"""/{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> tuple[list, list]:
lowercase : Dict = []
lowercase : int = []
for label_file in glob.glob(os.path.join(__snake_case , "*.txt" ) ):
lowercase : Any = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__snake_case ) as in_file:
lowercase : Union[str, Any] = in_file.readlines()
lowercase : Tuple = os.path.join(__snake_case , f"""{label_name}.jpg""" )
lowercase : str = []
for obj_list in obj_lists:
lowercase : Any = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__snake_case )
labels.append(__snake_case )
return img_paths, labels
def __magic_name__ ( __snake_case : list , __snake_case : list , __snake_case : int = 1 ) -> tuple[list, list, list]:
lowercase : List[Any] = []
lowercase : Tuple = []
lowercase : Optional[Any] = []
for idx in range(len(__snake_case ) ):
lowercase : int = []
lowercase : List[Any] = img_list[idx]
path_list.append(__snake_case )
lowercase : List[str] = anno_list[idx]
lowercase : List[Any] = cva.imread(__snake_case )
if flip_type == 1:
lowercase : Any = cva.flip(__snake_case , __snake_case )
for bbox in img_annos:
lowercase : str = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowercase : Tuple = cva.flip(__snake_case , __snake_case )
for bbox in img_annos:
lowercase : List[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__snake_case )
new_imgs_list.append(__snake_case )
return new_imgs_list, new_annos_lists, path_list
def __magic_name__ ( __snake_case : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowercase : str = ascii_lowercase + digits
return "".join(random.choice(__snake_case ) for _ in range(__snake_case ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 518
| 0
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_A = logging.get_logger("transformers.models.speecht5")
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> str:
'''simple docstring'''
hf_model.apply_weight_norm()
__UpperCamelCase = checkpoint["input_conv.weight_g"]
__UpperCamelCase = checkpoint["input_conv.weight_v"]
__UpperCamelCase = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
__UpperCamelCase = checkpoint[f"""upsamples.{i}.1.weight_g"""]
__UpperCamelCase = checkpoint[f"""upsamples.{i}.1.weight_v"""]
__UpperCamelCase = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__UpperCamelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
__UpperCamelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
__UpperCamelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
__UpperCamelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
__UpperCamelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
__UpperCamelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
__UpperCamelCase = checkpoint["output_conv.1.weight_g"]
__UpperCamelCase = checkpoint["output_conv.1.weight_v"]
__UpperCamelCase = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
__UpperCamelCase = SpeechTaHifiGanConfig()
__UpperCamelCase = SpeechTaHifiGan(_snake_case )
__UpperCamelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["model"]["generator"] ,_snake_case ,_snake_case )
__UpperCamelCase = np.load(_snake_case )
__UpperCamelCase = stats[0].reshape(-1 )
__UpperCamelCase = stats[1].reshape(-1 )
__UpperCamelCase = torch.from_numpy(_snake_case ).float()
__UpperCamelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_A = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 505
|
"""simple docstring"""
def lowercase (_snake_case ) -> str:
'''simple docstring'''
if isinstance(_snake_case ,_snake_case ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_snake_case ,_snake_case ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
__UpperCamelCase = False
if num < 0:
__UpperCamelCase = True
__UpperCamelCase = -num
__UpperCamelCase = []
while num > 0:
binary.insert(0 ,num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_snake_case ) for e in binary )
return "0b" + "".join(str(_snake_case ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
| 1
|
'''simple docstring'''
from PIL import Image
def _SCREAMING_SNAKE_CASE( snake_case_ : Image , snake_case_ : float ) ->Image:
'''simple docstring'''
def brightness(snake_case_ : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(snake_case_ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
lowerCamelCase__ = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 411
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
'''simple docstring'''
def __init__( self : Optional[int] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 411
| 1
|
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def a_ ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def a_ ( SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase : Dict =terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
| 464
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class A ( __UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = """ctrl"""
lowerCamelCase : Optional[int] = ["""past_key_values"""]
lowerCamelCase : Optional[int] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase__=246_534 , lowerCamelCase__=256 , lowerCamelCase__=1_280 , lowerCamelCase__=8_192 , lowerCamelCase__=48 , lowerCamelCase__=16 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1e-6 , lowerCamelCase__=0.02 , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = dff
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
super().__init__(**lowerCamelCase__ )
| 325
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class snake_case ( datasets.BuilderConfig ):
__magic_name__ = None
class snake_case ( datasets.ArrowBasedBuilder ):
__magic_name__ = PandasConfig
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self : Union[str, Any] , A : Any ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
a : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A , (str, list, tuple) ):
a : List[str] = data_files
if isinstance(A , A ):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
a : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(A , A ):
a : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Optional[int] = [dl_manager.iter_files(A ) for file in files]
splits.append(datasets.SplitGenerator(name=A , gen_kwargs={'files': files} ) )
return splits
def lowerCamelCase__ ( self : List[str] , A : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Tuple = table_cast(A , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self : Dict , A : Union[str, Any] ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(A ) ):
with open(A , 'rb' ) as f:
a : Optional[int] = pa.Table.from_pandas(pd.read_pickle(A ) )
yield i, self._cast_table(A )
| 118
|
"""simple docstring"""
from __future__ import annotations
def snake_case (A_ :float , A_ :float , A_ :float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118
| 1
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
A : int = 'hf-internal-testing/tiny-random-bert'
A : Optional[int] = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
A : Tuple = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , """refs""" , """main""" ) ) as f:
lowercase__ = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """snapshots""" , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__ = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__ = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision="""9b8c223""" )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """snapshots""" , _UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(_UpperCAmelCase , """is not a valid model identifier""" ):
lowercase__ = cached_file("""tiny-random-bert""" , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , """is not a valid git identifier""" ):
lowercase__ = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision="""aaaa""" )
with self.assertRaisesRegex(_UpperCAmelCase , """does not appear to have a file named""" ):
lowercase__ = cached_file(_UpperCAmelCase , """conf""" )
def lowerCamelCase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(_UpperCAmelCase , """does not appear to have a file named""" ):
lowercase__ = cached_file(_UpperCAmelCase , """conf""" )
with open(os.path.join(_UpperCAmelCase , """refs""" , """main""" ) ) as f:
lowercase__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """.no_exist""" , _UpperCAmelCase , """conf""" ) ) )
lowercase__ = cached_file(_UpperCAmelCase , """conf""" , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__ = cached_file(_UpperCAmelCase , """conf""" , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__ = mock.Mock()
lowercase__ = 500
lowercase__ = {}
lowercase__ = HTTPError
lowercase__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_UpperCAmelCase ) as mock_head:
lowercase__ = cached_file(_UpperCAmelCase , """conf""" , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _UpperCAmelCase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _UpperCAmelCase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _UpperCAmelCase ) )
def lowerCamelCase__ (self : int ) -> int:
"""simple docstring"""
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , _UpperCAmelCase , revision="""ahaha""" )
lowercase__ = get_file_from_repo("""bert-base-cased""" , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__ = json.loads(open(_UpperCAmelCase , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = Path(_UpperCAmelCase ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , """a.txt""" ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , """b.txt""" ) )
| 15
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ :List[str] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int, _snake_case : Union[str, Any], _snake_case : List[str]=7, _snake_case : int=3, _snake_case : List[Any]=1_8, _snake_case : List[str]=3_0, _snake_case : str=4_0_0, _snake_case : Optional[Any]=None, _snake_case : Dict=True, _snake_case : str=True, _snake_case : Union[str, Any]=None, ) ->str:
snake_case__ : int = size if size is not None else {'height': 2_0, 'width': 2_0}
snake_case__ : Optional[Any] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : List[str] = image_size
snake_case__ : List[str] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : Union[str, Any] = size
snake_case__ : Tuple = do_normalize
snake_case__ : List[str] = do_convert_rgb
snake_case__ : List[Any] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
snake_case__ : Tuple = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
def lowercase_ ( self : str ) ->int:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase_ ( self : Optional[Any] ) ->str:
snake_case__ : List[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
snake_case__ : List[Any] = Image.open(requests.get(_snake_case, stream=_snake_case ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def lowercase_ ( self : str ) ->str:
snake_case__ : Optional[int] = PixaStructImageProcessingTester(self )
@property
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'do_convert_rgb' ) )
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : List[str] = self.image_processor_tester.prepare_dummy_image()
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
snake_case__ : Optional[int] = 2_0_4_8
snake_case__ : Optional[int] = image_processor(_snake_case, return_tensors='pt', max_patches=_snake_case )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_6_0_6 ), atol=1e-3, rtol=1e-3 ) )
def lowercase_ ( self : Tuple ) ->Dict:
# Initialize image_processor
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Optional[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Any = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : List[str] ) ->Optional[Any]:
# Initialize image_processor
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
snake_case__ : Union[str, Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_snake_case ):
snake_case__ : int = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
snake_case__ : Optional[Any] = 'Hello'
snake_case__ : Dict = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case, header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : List[Any] = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case, header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : Any ) ->int:
# Initialize image_processor
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, np.ndarray )
snake_case__ : Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : List[str] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Dict = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : List[Any] ) ->List[Any]:
# Initialize image_processor
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, torch.Tensor )
# Test not batched input
snake_case__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Optional[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : int = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) ->Union[str, Any]:
snake_case__ : Union[str, Any] = PixaStructImageProcessingTester(self, num_channels=4 )
snake_case__ : int = 3
@property
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[int] ) ->Optional[int]:
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'do_convert_rgb' ) )
def lowercase_ ( self : Optional[int] ) ->str:
# Initialize image_processor
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : List[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Any = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Dict = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 478
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a: Tuple = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Any = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[int] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__a: List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 402
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a: List[str] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Any = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__a: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 402
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: PreTrainedTokenizer , lowerCAmelCase: int , lowerCAmelCase: Optional[int] = None , ) -> Optional[Any]:
_UpperCAmelCase : Dict = {}
if train_file is not None:
_UpperCAmelCase : Dict = [train_file]
if eval_file is not None:
_UpperCAmelCase : int = [eval_file]
if test_file is not None:
_UpperCAmelCase : Dict = [test_file]
_UpperCAmelCase : Tuple = datasets.load_dataset("csv" , data_files=lowerCAmelCase )
_UpperCAmelCase : Any = list(ds[list(files.keys() )[0]].features.keys() )
_UpperCAmelCase : List[str] = features_name.pop(lowerCAmelCase )
_UpperCAmelCase : List[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
_UpperCAmelCase : Union[str, Any] = {label: i for i, label in enumerate(lowerCAmelCase )}
_UpperCAmelCase : Tuple = tokenizer.model_input_names
_UpperCAmelCase : List[str] = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
_UpperCAmelCase : Optional[Any] = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" ) , batched=lowerCAmelCase , )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
_UpperCAmelCase : Optional[Any] = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , ) , batched=lowerCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_UpperCAmelCase : int = {k: v for k, v in ex.items() if k in input_names}
_UpperCAmelCase : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_UpperCAmelCase : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
_UpperCAmelCase : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_UpperCAmelCase : int = {k: v for k, v in ex.items() if k in input_names}
_UpperCAmelCase : str = labelaid[ex[label_name]]
yield (d, label)
_UpperCAmelCase : List[str] = (
tf.data.Dataset.from_generator(
lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_UpperCAmelCase : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_UpperCAmelCase : List[Any] = (
tf.data.Dataset.from_generator(
lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_UpperCAmelCase : int = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_UpperCAmelCase : Union[str, Any] = (
tf.data.Dataset.from_generator(
lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_UpperCAmelCase : Any = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class a :
_lowercase = field(metadata={"help": "Which column contains the label"} )
_lowercase = field(default=UpperCAmelCase , metadata={"help": "The path of the training file"} )
_lowercase = field(default=UpperCAmelCase , metadata={"help": "The path of the development file"} )
_lowercase = field(default=UpperCAmelCase , metadata={"help": "The path of the test file"} )
_lowercase = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class a :
_lowercase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_lowercase = field(default=UpperCAmelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_UpperCAmelCase : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCAmelCase: EvalPrediction ) -> Dict:
_UpperCAmelCase : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_UpperCAmelCase : List[str] = TFTrainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , compute_metrics=lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase : Union[str, Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : List[Any] = trainer.evaluate()
_UpperCAmelCase : str = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(lowerCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 300
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[list[int]] ) -> bool:
_UpperCAmelCase : int = len(lowerCAmelCase )
# We need to create solution object to save path.
_UpperCAmelCase : List[Any] = [[0 for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )]
_UpperCAmelCase : Tuple = run_maze(lowerCAmelCase , 0 , 0 , lowerCAmelCase )
if solved:
print("\n".join(str(lowerCAmelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: list[list[int]] ) -> bool:
_UpperCAmelCase : str = len(lowerCAmelCase )
# Final check point.
if i == j == (size - 1):
_UpperCAmelCase : Tuple = 1
return True
_UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
_UpperCAmelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_UpperCAmelCase : List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_UpperCAmelCase : Tuple = 1
# check for directions
if (
run_maze(lowerCAmelCase , i + 1 , lowerCAmelCase , lowerCAmelCase )
or run_maze(lowerCAmelCase , lowerCAmelCase , j + 1 , lowerCAmelCase )
or run_maze(lowerCAmelCase , i - 1 , lowerCAmelCase , lowerCAmelCase )
or run_maze(lowerCAmelCase , lowerCAmelCase , j - 1 , lowerCAmelCase )
):
return True
_UpperCAmelCase : Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
SCREAMING_SNAKE_CASE__ : Tuple = '''.'''
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = []
with open(doctest_file_path) as fp:
for line in fp:
SCREAMING_SNAKE_CASE__ : int = line.strip()
SCREAMING_SNAKE_CASE__ : str = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
SCREAMING_SNAKE_CASE__ : Dict = '''\n'''.join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 581
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ : List[Any] = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
def _lowercase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , ) -> Any:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case__ =np.array([re.sub(_UpperCAmelCase , '' , _UpperCAmelCase ) for x in predictions] )
snake_case__ =np.array([re.sub(_UpperCAmelCase , '' , _UpperCAmelCase ) for x in references] )
else:
snake_case__ =np.asarray(_UpperCAmelCase )
snake_case__ =np.asarray(_UpperCAmelCase )
if ignore_case:
snake_case__ =np.char.lower(_UpperCAmelCase )
snake_case__ =np.char.lower(_UpperCAmelCase )
if ignore_punctuation:
snake_case__ =string.punctuation.maketrans('' , '' , string.punctuation )
snake_case__ =np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase )
snake_case__ =np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase )
if ignore_numbers:
snake_case__ =string.digits.maketrans('' , '' , string.digits )
snake_case__ =np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase )
snake_case__ =np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase )
snake_case__ =predictions == references
return {"exact_match": np.mean(_UpperCAmelCase ) * 100}
| 581
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCAmelCase ( UpperCamelCase_ ):
if len(__UpperCamelCase ) <= 1:
return arr, 0
__SCREAMING_SNAKE_CASE = len(__UpperCamelCase ) // 2
__SCREAMING_SNAKE_CASE = arr[0:mid]
__SCREAMING_SNAKE_CASE = arr[mid:]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = count_inversions_recursive(__UpperCamelCase )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = count_inversions_recursive(__UpperCamelCase )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
__SCREAMING_SNAKE_CASE = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__SCREAMING_SNAKE_CASE = count_inversions_bf(__UpperCamelCase )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__SCREAMING_SNAKE_CASE = count_inversions_bf(__UpperCamelCase )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = count_inversions_bf(__UpperCamelCase )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 155
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
snake_case : Optional[Any] = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 566
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__A = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(default=__snake_case , metadata={"""help""": """The input training data file (a text file)."""} )
__A = field(
default=__snake_case , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__A = field(
default=__snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
if self.train_file is not None:
snake_case_ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
snake_case_ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = 4_2
__A = True
__A = None
__A = None
def __call__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = 'label' if 'label' in features[0].keys() else 'labels'
snake_case_ = [feature.pop(__UpperCamelCase ) for feature in features]
snake_case_ = len(__UpperCamelCase )
snake_case_ = len(features[0]['input_ids'] )
snake_case_ = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features
]
snake_case_ = list(chain(*__UpperCamelCase ) )
snake_case_ = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
snake_case_ = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
snake_case_ = torch.tensor(__UpperCamelCase , dtype=torch.intaa )
return batch
def a():
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
snake_case_ = {}
if data_args.train_file is not None:
snake_case_ = data_args.train_file
if data_args.validation_file is not None:
snake_case_ = data_args.validation_file
snake_case_ = data_args.train_file.split('.' )[-1]
snake_case_ = load_dataset(
lowercase__ , data_files=lowercase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
snake_case_ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
snake_case_ = [f"""ending{i}""" for i in range(4 )]
snake_case_ = 'sent1'
snake_case_ = 'sent2'
if data_args.max_seq_length is None:
snake_case_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
snake_case_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase__ ):
snake_case_ = [[context] * 4 for context in examples[context_name]]
snake_case_ = examples[question_header_name]
snake_case_ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase__ )
]
# Flatten out
snake_case_ = list(chain(*lowercase__ ) )
snake_case_ = list(chain(*lowercase__ ) )
# Tokenize
snake_case_ = tokenizer(
lowercase__ , lowercase__ , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case_ = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
snake_case_ = train_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case_ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
snake_case_ = eval_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
snake_case_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase__ ):
snake_case_ , snake_case_ = eval_predictions
snake_case_ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
snake_case_ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('train' , lowercase__ )
trainer.save_metrics('train' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
snake_case_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def a(lowercase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 715
|
from collections import defaultdict
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = first_str.lower().strip()
snake_case_ = second_str.lower().strip()
# Remove whitespace
snake_case_ = first_str.replace(' ' , '' )
snake_case_ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
snake_case_ = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A = input('Enter the first string ').strip()
A = input('Enter the second string ').strip()
A = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 46
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Any = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase : Optional[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : str = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase : Tuple = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__lowerCamelCase : str = {'unk_token': '<unk>'}
__lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : Union[str, Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase : Optional[Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Any = self.get_rust_tokenizer()
__lowerCamelCase : int = self.get_image_processor()
__lowerCamelCase : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCamelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Tuple = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__lowerCamelCase : Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
__lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> str:
__lowerCamelCase : Union[str, Any] = self.get_image_processor()
__lowerCamelCase : Dict = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.prepare_image_inputs()
__lowerCamelCase : List[Any] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__lowerCamelCase : Tuple = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Dict = self.get_image_processor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : str = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = 'lower newer'
__lowerCamelCase : Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Dict = self.get_image_processor()
__lowerCamelCase : Optional[int] = self.get_tokenizer()
__lowerCamelCase : List[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = 'lower newer'
__lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCamelCase : Any = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = self.get_image_processor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase : Tuple = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> str:
__lowerCamelCase : int = self.get_image_processor()
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : int = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : List[str] = self.prepare_image_inputs()
__lowerCamelCase : List[str] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 13
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCamelCase__ ):
_UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Dict = FlaxAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCamelCase__ ):
_UpperCamelCase : Any = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Any = FlaxAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = tokenizer('Do you support jax jitted function?' ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase__ : Union[str, Any] ):
return model(**lowerCamelCase__ )
eval(**lowerCamelCase__ ).block_until_ready()
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Tuple = FlaxRobertaModel.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = tokenizer('Do you support jax jitted function?' ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase__ : Union[str, Any] ):
return model(**lowerCamelCase__ )
eval(**lowerCamelCase__ ).block_until_ready()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,'bert-base is not a local folder and is not a valid model identifier' ):
_UpperCamelCase : int = FlaxAutoModel.from_pretrained('bert-base' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_UpperCamelCase : Tuple = FlaxAutoModel.from_pretrained(lowerCamelCase__ ,revision='aaaaaa' )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' ,):
_UpperCamelCase : List[Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(lowerCamelCase__ ,'Use `from_pt=True` to load this model' ):
_UpperCamelCase : Tuple = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 195
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Tuple = CTRLTokenizer
A: Any = False
A: Optional[int] = False
def UpperCAmelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : Tuple = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
UpperCamelCase__ : Any = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase__ : int = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
UpperCamelCase__ : int = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : int , **lowerCamelCase__ : Dict ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''adapt react readapt apt'''
UpperCamelCase__ : Tuple = '''adapt react readapt apt'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ : Optional[int] = '''adapt react readapt apt'''
UpperCamelCase__ : List[Any] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
UpperCamelCase__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : int = tokens + [tokenizer.unk_token]
UpperCamelCase__ : int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 106
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCamelCase : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
__UpperCamelCase : int = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
__UpperCamelCase : int = "▁"
class __magic_name__ ( __lowerCAmelCase):
A: Tuple = VOCAB_FILES_NAMES
A: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple="<s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : str="<pad>" , lowerCamelCase__ : int="<mask>" , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Any , ) -> None:
'''simple docstring'''
UpperCamelCase__ : str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
UpperCamelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
UpperCamelCase__ : Any = vocab_file
UpperCamelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
UpperCamelCase__ : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCamelCase__ : Tuple = len(self.sp_model ) - 1
UpperCamelCase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : int = [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Tuple = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ : List[str] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Any = ''''''
UpperCamelCase__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
UpperCamelCase__ : str = True
UpperCamelCase__ : Tuple = []
else:
current_sub_tokens.append(lowerCamelCase__ )
UpperCamelCase__ : Any = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : str = self.__dict__.copy()
UpperCamelCase__ : int = None
return state
def __setstate__( self : Tuple , lowerCamelCase__ : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
UpperCamelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 106
| 1
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_lowerCamelCase : Union[str, Any] = """scheduler_config.json"""
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 5
@dataclass
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = SCHEDULER_CONFIG_NAME
UpperCAmelCase__ = ['''dtype''']
UpperCAmelCase__ = []
UpperCAmelCase__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , UpperCAmelCase__ : Dict[str, Any] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : Union[str, Any] , ) ->Union[str, Any]:
'''simple docstring'''
A__ , A__ = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase__ , subfolder=UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ , A__ = cls.from_config(UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__)
if hasattr(UpperCAmelCase__ , '''create_state''') and getattr(UpperCAmelCase__ , '''has_state''' , UpperCAmelCase__):
A__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Union[str, os.PathLike] , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : Optional[Any]) ->List[Any]:
'''simple docstring'''
self.save_config(save_directory=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int) ->Dict:
'''simple docstring'''
A__ = list(set([cls.__name__] + cls._compatibles))
A__ = importlib.import_module(__name__.split('''.''')[0])
A__ = [
getattr(UpperCAmelCase__ , UpperCAmelCase__) for c in compatible_classes_str if hasattr(UpperCAmelCase__ , UpperCAmelCase__)
]
return compatible_classes
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> jnp.ndarray:
"""simple docstring"""
assert len(lowercase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase_ ) - x.ndim) ) , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=0.9_99 , lowercase_=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(lowercase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
A__ = []
for i in range(lowercase_ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase_ ) / alpha_bar(lowercase_ ) , lowercase_ ) )
return jnp.array(lowercase_ , dtype=lowercase_ )
@flax.struct.dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : List[str]) ->Any:
'''simple docstring'''
A__ = scheduler.config
if config.trained_betas is not None:
A__ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
A__ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype)
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""")
A__ = 1.0 - betas
A__ = jnp.cumprod(UpperCAmelCase__ , axis=0)
return cls(
alphas=UpperCAmelCase__ , betas=UpperCAmelCase__ , alphas_cumprod=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = state.alphas_cumprod
A__ = alphas_cumprod[timesteps] ** 0.5
A__ = sqrt_alpha_prod.flatten()
A__ = broadcast_to_shape_from_left(lowercase_ , original_samples.shape )
A__ = (1 - alphas_cumprod[timesteps]) ** 0.5
A__ = sqrt_one_minus_alpha_prod.flatten()
A__ = broadcast_to_shape_from_left(lowercase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ , A__ = get_sqrt_alpha_prod(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ , A__ = get_sqrt_alpha_prod(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 87
|
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : List[str] = order
# a_{0} ... a_{k}
UpperCamelCase__ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase__ : Any = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase__ : Tuple = [0.0] * self.order
def __UpperCamelCase ( self : int , UpperCAmelCase_ : list[float] , UpperCAmelCase_ : list[float]):
if len(UpperCAmelCase_) < self.order:
UpperCamelCase__ : Any = [1.0, *a_coeffs]
if len(UpperCAmelCase_) != self.order + 1:
UpperCamelCase__ : List[Any] = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(UpperCAmelCase_)}'
)
raise ValueError(UpperCAmelCase_)
if len(UpperCAmelCase_) != self.order + 1:
UpperCamelCase__ : Dict = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(UpperCAmelCase_)}'
)
raise ValueError(UpperCAmelCase_)
UpperCamelCase__ : List[str] = a_coeffs
UpperCamelCase__ : Any = b_coeffs
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : float):
UpperCamelCase__ : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase__ : Union[str, Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase__ : Any = self.input_history[:-1]
UpperCamelCase__ : Optional[int] = self.output_history[:-1]
UpperCamelCase__ : Dict = sample
UpperCamelCase__ : Any = result
return result
| 596
| 0
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :Dict ):
debug_launcher(test_script.main )
def lowerCamelCase ( self :Union[str, Any] ):
debug_launcher(test_ops.main )
| 524
|
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(UpperCamelCase ) * abs(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 524
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = (DDIMParallelScheduler,)
lowerCamelCase_ = (("eta", 0.0), ("num_inference_steps", 50))
def _snake_case ( self :List[Any] , **__A :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**__A )
return config
def _snake_case ( self :int , **__A :List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**__A )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 10, 0.0
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ = model(__A , __A )
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A , __A ).prev_sample
return sample
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def _snake_case ( self :Union[str, Any] ) -> Tuple:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _snake_case ( self :List[str] ) -> Any:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def _snake_case ( self :int ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def _snake_case ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__A )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__A )
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.check_over_configs(thresholding=__A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , )
def _snake_case ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=__A )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__A , num_inference_steps=__A )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__A , eta=__A )
def _snake_case ( self :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.0_2 ) ) < 1E-5
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 10, 0.0
scheduler.set_timesteps(__A )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE__ = samplea.shape[0]
SCREAMING_SNAKE_CASE__ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.arange(__A )[0:3, None].repeat(1 , __A )
SCREAMING_SNAKE_CASE__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE__ = scheduler.batch_step_no_noise(__A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __A )
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop()
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def _snake_case ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop(set_alpha_to_one=__A , beta_start=0.0_1 )
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop(set_alpha_to_one=__A , beta_start=0.0_1 )
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 6
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 282
| 0
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Dict = pad_token_id
lowerCAmelCase : int = max_length
lowerCAmelCase : List[str] = vocab
lowerCAmelCase : Tuple = merges
lowerCAmelCase : Optional[Any] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def lowercase__ ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = [" ".join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
lowerCAmelCase : Optional[int] = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def lowercase__ ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def lowercase__ ( cls , snake_case__ ):
"""simple docstring"""
return cls(**snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Any = self.tf_tokenizer(snake_case__ )
lowerCAmelCase : Union[str, Any] = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCAmelCase : str = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCAmelCase , lowerCAmelCase : str = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 681
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681
| 1
|
from collections import deque
from .hash_table import HashTable
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Dict , *a__ : List[Any] , **a__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
def a (self : str , a__ : Dict , a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(a__ )
__snake_case = self.values[key]
def a (self : Dict ):
"""simple docstring"""
return (
sum(self.charge_factor - len(a__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a (self : Tuple , a__ : Union[str, Any] , a__ : Optional[Any]=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(a__ ) == 0
):
return key
return super()._collision_resolution(a__ , a__ )
| 592
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : Any ) -> Tuple:
# initialize config
if "resnet-50" in model_name:
__snake_case = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
__snake_case = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
__snake_case = DetrConfig(use_timm_backbone=snake_case_ , backbone_config=snake_case_ )
# set label attributes
__snake_case = '''panoptic''' in model_name
if is_panoptic:
__snake_case = 250
else:
__snake_case = 91
__snake_case = '''huggingface/label-files'''
__snake_case = '''coco-detection-id2label.json'''
__snake_case = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__snake_case = {int(snake_case_ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( snake_case_ : Dict ) -> Union[str, Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : int ) -> str:
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : List[Any]=False ) -> List[str]:
__snake_case = ''''''
if is_panoptic:
__snake_case = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:256, :]
__snake_case = in_proj_bias[:256]
__snake_case = in_proj_weight[256:512, :]
__snake_case = in_proj_bias[256:512]
__snake_case = in_proj_weight[-256:, :]
__snake_case = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:256, :]
__snake_case = in_proj_bias[:256]
__snake_case = in_proj_weight[256:512, :]
__snake_case = in_proj_bias[256:512]
__snake_case = in_proj_weight[-256:, :]
__snake_case = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__snake_case = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__snake_case = in_proj_weight_cross_attn[:256, :]
__snake_case = in_proj_bias_cross_attn[:256]
__snake_case = in_proj_weight_cross_attn[256:512, :]
__snake_case = in_proj_bias_cross_attn[256:512]
__snake_case = in_proj_weight_cross_attn[-256:, :]
__snake_case = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ) -> int:
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : str=None , snake_case_ : Dict=False ) -> Dict:
__snake_case , __snake_case = get_detr_config(snake_case_ )
# load original model from torch hub
__snake_case = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f"""Converting model {model_name}...""" )
__snake_case = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=snake_case_ ).eval()
__snake_case = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case_ ):
if is_panoptic:
__snake_case = '''detr.''' + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__snake_case = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
# finally, create HuggingFace model and load state dict
__snake_case = DetrForSegmentation(snake_case_ ) if is_panoptic else DetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# verify our conversion on an image
__snake_case = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__snake_case = DetrImageProcessor(format=snake_case_ )
__snake_case = processor(images=prepare_img() , return_tensors='''pt''' )
__snake_case = encoding['''pixel_values''']
__snake_case = detr(snake_case_ )
__snake_case = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
snake_case_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 592
| 1
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowercase__ :Optional[int] , lowercase__ :Optional[Any]=13 , lowercase__ :List[Any]=7 , lowercase__ :Dict=True , lowercase__ :Union[str, Any]=True , lowercase__ :Tuple=True , lowercase__ :Optional[Any]=True , lowercase__ :Any=99 , lowercase__ :List[Any]=32 , lowercase__ :List[Any]=5 , lowercase__ :Dict=4 , lowercase__ :str=37 , lowercase__ :Optional[int]="gelu" , lowercase__ :Optional[int]=0.1 , lowercase__ :List[str]=0.1 , lowercase__ :Any=512 , lowercase__ :Any=16 , lowercase__ :int=2 , lowercase__ :Tuple=0.02 , lowercase__ :Optional[int]=4 , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def __UpperCAmelCase ( self :Any ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Tuple ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class a__ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
A__ : str = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :Optional[Any] ):
lowercase = FlaxAlbertModelTester(self )
@slow
def __UpperCAmelCase ( self :Any ):
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained('albert-base-v2' )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase__ )
@require_flax
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self :List[str] ):
lowercase = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase = model(lowercase__ , attention_mask=lowercase__ )[0]
lowercase = (1, 11, 768)
self.assertEqual(output.shape , lowercase__ )
lowercase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowercase__ , atol=1E-4 ) )
| 700
|
from __future__ import annotations
import os
from collections.abc import Mapping
__magic_name__ = tuple[int, int]
class a__ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase__ :set[int] , lowercase__ :Mapping[EdgeT, int] ):
lowercase = vertices
lowercase = {
(min(lowercase__ ), max(lowercase__ )): weight for edge, weight in edges.items()
}
def __UpperCAmelCase ( self :Union[str, Any] , lowercase__ :EdgeT , lowercase__ :int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowercase = weight
def __UpperCAmelCase ( self :str ):
lowercase = Graph({min(self.vertices )} , {} )
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowercase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase = edge
lowercase = weight
subgraph.add_edge(lowercase__ , lowercase__ )
return subgraph
def __snake_case ( _UpperCAmelCase = "p107_network.txt" ):
"""simple docstring"""
lowercase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowercase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase = {}
lowercase = 42
lowercase = 42
lowercase = 42
with open(_UpperCAmelCase ) as f:
lowercase = f.read().strip().split('\n' )
lowercase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase = int(adjaceny_matrix[edgea][edgea] )
lowercase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowercase = graph.prims_algorithm()
lowercase = sum(graph.edges.values() )
lowercase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 314
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = '''▁'''
__lowercase : str = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__lowercase : List[str] = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__lowercase : Union[str, Any] = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__lowercase : List[Any] = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__lowercase : Optional[int] = {'''mustc''': MUSTC_LANGS}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = MAX_MODEL_INPUT_SIZES
__lowerCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
__lowerCamelCase : List[int] = []
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_="<s>" ,SCREAMING_SNAKE_CASE_="</s>" ,SCREAMING_SNAKE_CASE_="<pad>" ,SCREAMING_SNAKE_CASE_="<unk>" ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ ,eos_token=SCREAMING_SNAKE_CASE_ ,unk_token=SCREAMING_SNAKE_CASE_ ,pad_token=SCREAMING_SNAKE_CASE_ ,do_upper_case=SCREAMING_SNAKE_CASE_ ,do_lower_case=SCREAMING_SNAKE_CASE_ ,tgt_lang=SCREAMING_SNAKE_CASE_ ,lang_codes=SCREAMING_SNAKE_CASE_ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE_ ,)
snake_case : Optional[int] = do_upper_case
snake_case : Optional[Any] = do_lower_case
snake_case : Optional[Any] = load_json(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = {v: k for k, v in self.encoder.items()}
snake_case : Tuple = spm_file
snake_case : Optional[Any] = load_spm(SCREAMING_SNAKE_CASE_ ,self.sp_model_kwargs )
if lang_codes is not None:
snake_case : List[Any] = lang_codes
snake_case : str = LANGUAGES[lang_codes]
snake_case : Tuple = [F"""<lang:{lang}>""" for lang in self.langs]
snake_case : List[str] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
snake_case : Dict = self.lang_tokens
snake_case : Tuple = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
snake_case : str = {}
@property
def snake_case_ ( self ):
'''simple docstring'''
return len(self.encoder )
@property
def snake_case_ ( self ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = self.lang_code_to_id[tgt_lang]
snake_case : Union[str, Any] = [lang_code_id]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ ,out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ ,self.encoder[self.unk_token] )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ ,self.unk_token )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
snake_case : int = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
snake_case : Tuple = self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
snake_case : Optional[int] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
snake_case : str = self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ ,token_ids_a=SCREAMING_SNAKE_CASE_ ,already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
snake_case : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
snake_case : Tuple = self.__dict__.copy()
snake_case : List[Any] = None
return state
def __setstate__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
snake_case : List[str] = {}
snake_case : Dict = load_spm(self.spm_file ,self.sp_model_kwargs )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
snake_case : List[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
snake_case : List[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder ,SCREAMING_SNAKE_CASE_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.spm_file ):
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as fi:
snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (str(SCREAMING_SNAKE_CASE_ ), str(SCREAMING_SNAKE_CASE_ ))
def lowercase ( __A : str , __A : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
snake_case : Optional[int] = sentencepiece.SentencePieceProcessor(**__A )
spm.Load(str(__A ) )
return spm
def lowercase ( __A : str ) -> Union[Dict, List]:
'''simple docstring'''
with open(__A , """r""" ) as f:
return json.load(__A )
def lowercase ( __A : str , __A : str ) -> None:
'''simple docstring'''
with open(__A , """w""" ) as f:
json.dump(__A , __A , indent=2 )
| 36
|
'''simple docstring'''
import random
from typing import Any
def a_ ( _lowerCAmelCase ) -> list[Any]:
for _ in range(len(_lowerCAmelCase ) ):
__lowerCamelCase : Optional[Any] = random.randint(0 ,len(_lowerCAmelCase ) - 1 )
__lowerCamelCase : str = random.randint(0 ,len(_lowerCAmelCase ) - 1 )
__lowerCamelCase ,__lowerCamelCase : List[Any] = data[b], data[a]
return data
if __name__ == "__main__":
_UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_UpperCamelCase = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 459
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = CycleDiffusionPipeline
_A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
_A = PipelineTesterMixin.required_optional_params - {"latents"}
_A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
_A = IMAGE_TO_IMAGE_IMAGE_PARAMS
_A = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=10_00 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
A =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A =CLIPTextModel(snake_case__ )
A =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _a ( self : List[Any] , snake_case__ : str , snake_case__ : Dict=0 ):
"""simple docstring"""
A =floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
A =image / 2 + 0.5
if str(snake_case__ ).startswith("mps" ):
A =torch.manual_seed(snake_case__ )
else:
A =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
A ={
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =self.get_dummy_components()
A =CycleDiffusionPipeline(**snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =self.get_dummy_inputs(snake_case__ )
A =pipe(**snake_case__ )
A =output.images
A =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A =np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _a ( self : int ):
"""simple docstring"""
A =self.get_dummy_components()
for name, module in components.items():
if hasattr(snake_case__ , "half" ):
A =module.half()
A =CycleDiffusionPipeline(**snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =self.get_dummy_inputs(snake_case__ )
A =pipe(**snake_case__ )
A =output.images
A =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A =np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a ( self : List[Any] ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _a ( self : List[str] ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a ( self : str ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _a ( self : List[str] ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : int ):
"""simple docstring"""
A =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
A =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
A =init_image.resize((5_12, 5_12) )
A ="CompVis/stable-diffusion-v1-4"
A =DDIMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" )
A =CycleDiffusionPipeline.from_pretrained(
snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
A ="A black colored car"
A ="A blue colored car"
A =torch.manual_seed(0 )
A =pipe(
prompt=snake_case__ , source_prompt=snake_case__ , image=snake_case__ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case__ , output_type="np" , )
A =output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a ( self : Dict ):
"""simple docstring"""
A =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
A =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
A =init_image.resize((5_12, 5_12) )
A ="CompVis/stable-diffusion-v1-4"
A =DDIMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" )
A =CycleDiffusionPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
A ="A black colored car"
A ="A blue colored car"
A =torch.manual_seed(0 )
A =pipe(
prompt=snake_case__ , source_prompt=snake_case__ , image=snake_case__ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case__ , output_type="np" , )
A =output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 689
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689
| 1
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = MgpstrTokenizer
snake_case__ = False
snake_case__ = {}
snake_case__ = False
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase = 'tester'
UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
UpperCamelCase = tokenizer.encode([special_token] , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase = tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertTrue(special_token not in decoded )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
UpperCamelCase , UpperCamelCase = self.get_input_output_texts(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertNotEqual(len(_SCREAMING_SNAKE_CASE ) , 0 )
UpperCamelCase = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(text_a.replace(' ' , '' ) , _SCREAMING_SNAKE_CASE )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
| 280
|
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
UpperCamelCase = len(_UpperCamelCase) if (len(_UpperCamelCase) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8) , 'Stack'.center(_UpperCamelCase) , 'Postfix'.center(_UpperCamelCase) , sep=' | ' , )
print('-' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCamelCase) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCamelCase) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCamelCase) == 0:
stack.append(_UpperCamelCase) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCamelCase) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(_UpperCamelCase) # push x to stack
print(
x.center(8) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , sep=' | ' , ) # Output in tabular format
while len(_UpperCamelCase) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
' '.center(8) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , sep=' | ' , ) # Output in tabular format
return "".join(_UpperCamelCase) # return Postfix as str
def lowercase__ ( _UpperCamelCase) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = list(infix[::-1]) # reverse the infix equation
for i in range(len(_UpperCamelCase)):
if infix[i] == "(":
UpperCamelCase = ')' # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase = '(' # change ")" to "("
return (infix_2_postfix(''.join(_UpperCamelCase)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__magic_name__ : int = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__magic_name__ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 280
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCAmelCase = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = """albert"""
def __init__( self : int , __UpperCamelCase : int=30_000 , __UpperCamelCase : Optional[int]=128 , __UpperCamelCase : Dict=4_096 , __UpperCamelCase : str=12 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : List[Any]=64 , __UpperCamelCase : Any=16_384 , __UpperCamelCase : Any=1 , __UpperCamelCase : Dict="gelu_new" , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : List[Any]=512 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Optional[Any]=0.02 , __UpperCamelCase : List[str]=1e-1_2 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Dict="absolute" , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : int=3 , **__UpperCamelCase : str , ):
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_hidden_groups
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = inner_group_num
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = position_embedding_type
class __SCREAMING_SNAKE_CASE ( lowercase):
@property
def UpperCAmelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 721
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = "▁"
__lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCAmelCase = {
"facebook/xglm-564M": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[str] , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase = 7
_UpperCAmelCase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_UpperCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , __UpperCamelCase : str ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 129
| 0
|
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if len(lowercase ) != len(lowercase ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
snake_case : Union[str, Any] = [p / w for p, w in zip(lowercase ,lowercase )]
# Creating a copy of the list and sorting profit/weight in ascending order
snake_case : str = sorted(lowercase )
# declaring useful variables
snake_case : Optional[Any] = len(lowercase )
snake_case : Union[str, Any] = 0
snake_case : Optional[int] = 0
snake_case : str = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
snake_case : List[Any] = sorted_profit_by_weight[length - i - 1]
snake_case : Any = profit_by_weight.index(lowercase )
snake_case : int = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
lowerCamelCase : Union[str, Any] = [int(x) for x in input('Input profits separated by spaces: ').split()]
lowerCamelCase : Dict = [int(x) for x in input('Input weights separated by spaces: ').split()]
lowerCamelCase : Any = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 587
|
lowerCamelCase : Dict = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase : List[str] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase : Dict = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
assert len(str(lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case : int = year // 100
snake_case : Dict = (5 * (century % 4) + 2) % 7
snake_case : Optional[Any] = year % 100
snake_case : Any = centurian % 12
snake_case : List[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case : Union[str, Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 587
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
assert isinstance(A__ , A__ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
lowerCAmelCase_ : Any = f'The input value of [n={number}] has to be > 0'
raise ValueError(A__ )
else:
lowerCAmelCase_ : Optional[int] = sylvester(number - 1 )
lowerCAmelCase_ : Dict = num - 1
lowerCAmelCase_ : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 717
|
'''simple docstring'''
from itertools import count
def UpperCamelCase_ ( A__ : int = 50 ):
'''simple docstring'''
lowerCAmelCase_ : Any = [1] * min_block_length
for n in count(A__ ):
fill_count_functions.append(1 )
for block_length in range(A__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 398
| 0
|
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase__ ( _a : int = 2_00_00_00 ):
snake_case_ : list[int] = [0]
snake_case_ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case_ : int = 0
# the area corresponding to the grid that gives the product closest to target
snake_case_ : int = 0
# an estimate of b, using the quadratic formula
snake_case_ : float
# the largest integer less than b_estimate
snake_case_ : int
# the largest integer less than b_estimate
snake_case_ : int
# the triangle number corresponding to b_floor
snake_case_ : int
# the triangle number corresponding to b_ceil
snake_case_ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case_ : Optional[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case_ : List[str] = floor(_a )
snake_case_ : Any = ceil(_a )
snake_case_ : Tuple = triangle_numbers[b_floor]
snake_case_ : Any = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case_ : Union[str, Any] = triangle_b_first_guess * triangle_a
snake_case_ : int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case_ : str = triangle_b_second_guess * triangle_a
snake_case_ : Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 568
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
lowercase : List[str] = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
lowercase : int = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def lowerCAmelCase__ ( _a : List[str] ):
snake_case_ : str = set()
snake_case_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : List[Any] = char
snake_case_ : Any = set(_a )
return pairs
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = vocab_file
snake_case_ : Any = merges_file
snake_case_ : Any = {}
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 1
snake_case_ : Optional[int] = 2
snake_case_ : Optional[int] = 3
self.add_from_file(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
snake_case_ : List[Any] = merges_handle.read().split("\n" )[:-1]
snake_case_ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
snake_case_ : List[str] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Optional[Any] = {}
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
snake_case_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Dict:
return len(self.encoder )
def _lowerCAmelCase ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
snake_case_ : List[Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case_ : str = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
snake_case_ : List[str] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : Dict = bigram
snake_case_ : Any = []
snake_case_ : Any = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
snake_case_ : List[str] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Optional[Any] = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Union[str, Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
snake_case_ : List[Any] = get_pairs(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = "@@ ".join(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = word[:-4]
snake_case_ : Union[str, Any] = word
return word
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ : List[Any] = []
snake_case_ : str = re.findall(r"\S+\n?" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : Any = " ".join(_SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , _SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
snake_case_ : Dict = f.readlines()
for lineTmp in lines:
snake_case_ : Tuple = lineTmp.strip()
snake_case_ : Dict = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
snake_case_ : Optional[Any] = line[:idx]
snake_case_ : List[Any] = len(self.encoder )
| 568
| 1
|
'''simple docstring'''
from typing import Any
import numpy as np
def _lowerCAmelCase ( lowercase : np.ndarray ) ->bool:
"""simple docstring"""
return np.array_equal(lowercase , matrix.conjugate().T )
def _lowerCAmelCase ( lowercase : np.ndarray , lowercase : np.ndarray ) ->Any:
"""simple docstring"""
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(lowercase )
assert isinstance(lowercase , np.ndarray )
return (v_star_dot.dot(lowercase )) / (v_star.dot(lowercase ))
def _lowerCAmelCase ( ) ->None:
"""simple docstring"""
lowercase__ = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(lowercase ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(lowercase , lowercase ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(lowercase , lowercase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 318
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 318
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase__ = RemBertConfig.from_json_file(UpperCamelCase_ )
print("Building PyTorch model from configuration: {}".format(str(UpperCamelCase_ ) ) )
lowerCAmelCase__ = RemBertModel(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print("Save PyTorch model to {}".format(UpperCamelCase_ ) )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 48
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowercase : Any =False
@skip_mps
class UpperCamelCase_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[int] = StableDiffusionAttendAndExcitePipeline
_a : Union[str, Any] = False
_a : Dict = TEXT_TO_IMAGE_PARAMS
_a : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __a ( cls : Tuple ):
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase )
@classmethod
def __a ( cls : Tuple ):
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase )
def __a ( self : Dict ):
torch.manual_seed(0 )
lowerCamelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase , )
lowerCamelCase_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
lowerCamelCase_ : Any = CLIPTextModel(lowerCamelCase )
lowerCamelCase_ : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any]=0 ):
if str(lowerCamelCase ).startswith('mps' ):
lowerCamelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase_ : Dict = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : List[Any] = 'cpu'
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : List[str] = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase_ : List[Any] = self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase_ : List[str] = pipe(**lowerCamelCase ).images
lowerCamelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase_ : Dict = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1E-3 )
def __a ( self : Tuple ):
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __a ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self : List[str] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __a ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __a ( self : Optional[Any] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __a ( self : Tuple ):
super().test_save_load_local(expected_max_difference=5E-4 )
def __a ( self : List[Any] ):
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def __a ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase )
@classmethod
def __a ( cls : List[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase )
def __a ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : str ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(51 )
lowerCamelCase_ : Any = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=lowerCamelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCamelCase_ : List[Any] = 'a painting of an elephant with glasses'
lowerCamelCase_ : Tuple = [5, 7]
lowerCamelCase_ : List[str] = pipe(
prompt=lowerCamelCase , token_indices=lowerCamelCase , guidance_scale=7.5 , generator=lowerCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCamelCase_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 364
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Tuple = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 590
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 590
| 1
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_snake_case = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_snake_case = subprocess.check_output(f'git diff --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
_snake_case = '''|'''.join(sys.argv[1:])
_snake_case = re.compile(Rf'^({joined_dirs}).*?\.py$')
_snake_case = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 580
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = 9
_lowerCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowerCAmelCase = kruskal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE )
| 580
| 1
|
from math import sqrt
def lowerCAmelCase_ ( A_ = 1_00_00_00):
UpperCamelCase__: Tuple = 0
UpperCamelCase__: Any = 0
UpperCamelCase__: List[Any] = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2).is_integer():
num_cuboids += (
min(UpperCAmelCase__ ,sum_shortest_sides // 2)
- max(1 ,sum_shortest_sides - max_cuboid_size)
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"{solution() = }")
| 715
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__: Union[str, Any] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowerCAmelCase_ ( A_ ,A_=None):
require_version(deps[pkg] ,A_)
| 221
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =None
_lowerCamelCase =None
a__ : str = namedtuple('CoinsDistribResult', 'moves excess')
def __snake_case ( SCREAMING_SNAKE_CASE_ : TreeNode | None ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE_ ) != count_coins(SCREAMING_SNAKE_CASE_ ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase, UpperCAmelCase = get_distrib(node.left )
UpperCAmelCase, UpperCAmelCase = get_distrib(node.right )
UpperCAmelCase = 1 - left_distrib_excess
UpperCAmelCase = 1 - right_distrib_excess
UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE_ )
+ abs(SCREAMING_SNAKE_CASE_ )
)
UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return get_distrib(SCREAMING_SNAKE_CASE_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__snake_case = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
__snake_case = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__snake_case = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__snake_case = sorted(arg_to_scheduler.keys())
__snake_case = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class _lowerCAmelCase ( pl.LightningModule ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="base" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase__ )
snake_case : Tuple = 0
snake_case : List[str] = Path(self.hparams.output_dir )
snake_case : Any = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
snake_case : int = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase__ , **UpperCamelCase__ , )
else:
snake_case : PretrainedConfig = config
snake_case : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase__ , UpperCamelCase__ ):
assert hasattr(self.config , UpperCamelCase__ ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , UpperCamelCase__ , getattr(self.hparams , UpperCamelCase__ ) )
if tokenizer is None:
snake_case : Dict = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase__ , )
else:
snake_case : PreTrainedTokenizer = tokenizer
snake_case : List[str] = MODEL_MODES[mode]
if model is None:
snake_case : Union[str, Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase__ , )
else:
snake_case : List[Any] = model
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.model_type.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Dict = arg_to_scheduler[self.hparams.lr_scheduler]
snake_case : Any = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
snake_case : List[str] = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = self.model
snake_case : Tuple = ["bias", "LayerNorm.weight"]
snake_case : Any = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
snake_case : Optional[int] = Adafactor(
UpperCamelCase__ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase__ , relative_step=UpperCamelCase__ )
else:
snake_case : Optional[Any] = AdamW(
UpperCamelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
snake_case : Any = optimizer
snake_case : List[Any] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.validation_step(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.validation_end(UpperCamelCase__ )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[str] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
snake_case : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCamelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if stage == "test":
snake_case : Dict = len(self.test_dataloader().dataset )
else:
snake_case : str = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
snake_case : Dict = len(self.train_dataloader().dataset )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> int:
'''simple docstring'''
raise NotImplementedError("You must implement this for your task" )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.train_loader
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
UpperCamelCase__ , list(filter(UpperCamelCase__ , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCamelCase ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
snake_case : str = self.output_dir.joinpath("best_tfmr" )
snake_case : int = self.step_count
self.model.save_pretrained(UpperCamelCase__ )
self.tokenizer.save_pretrained(UpperCamelCase__ )
@staticmethod
def lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase__ , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(UpperCamelCase__ ).parent / "test_run" / "cache" ) , type=UpperCamelCase__ , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=UpperCamelCase__ , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=UpperCamelCase__ , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=UpperCamelCase__ , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=UpperCamelCase__ , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=UpperCamelCase__ , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=UpperCamelCase__ , metavar=UpperCamelCase__ , type=UpperCamelCase__ , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCamelCase__ , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=UpperCamelCase__ , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase__ , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=UpperCamelCase__ , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=UpperCamelCase__ )
parser.add_argument("--train_batch_size" , default=32 , type=UpperCamelCase__ )
parser.add_argument("--eval_batch_size" , default=32 , type=UpperCamelCase__ )
parser.add_argument("--adafactor" , action="store_true" )
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase__ )
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = trainer.lr_schedulers[0]["scheduler"]
snake_case : str = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
rank_zero_info("***** Validation results *****" )
snake_case : List[str] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
rank_zero_info("***** Test results *****" )
snake_case : Dict = trainer.callback_metrics
# Log and save results to file
snake_case : Union[str, Any] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(UpperCamelCase__ , "w" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> None:
"""simple docstring"""
parser.add_argument(
"--output_dir" , default=str(Path(lowercase ).parent / "test_run" / "model_checkpoints" ) , type=lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowercase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=lowercase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=lowercase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=lowercase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=lowercase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(lowercase ).parent / "test_run" / "dummy-train-data" ) , type=lowercase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def __lowerCAmelCase ( lowercase : BaseTransformer , lowercase : argparse.Namespace , lowercase : Any=None , lowercase : List[str]=True , lowercase : List[Any]=[] , lowercase : Any=None , lowercase : Optional[int]=None , **lowercase : List[Any] , ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
snake_case : Optional[int] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowercase )
# add custom checkpoints
if checkpoint_callback is None:
snake_case : int = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowercase )
if logging_callback is None:
snake_case : Tuple = LoggingCallback()
snake_case : str = {}
if args.fpaa:
snake_case : Union[str, Any] = 16
if args.gpus > 1:
snake_case : List[str] = "auto"
snake_case : int = "ddp"
snake_case : Dict = args.accumulate_grad_batches
snake_case : Tuple = None
snake_case : Any = "auto"
snake_case : int = pl.Trainer.from_argparse_args(
lowercase , weights_summary=lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **lowercase , )
if args.do_train:
trainer.fit(lowercase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 178
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any=13 , lowerCamelCase_ : str=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : int=30 , lowerCamelCase_ : Optional[int]=4_00 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : List[str]=[0.5, 0.5, 0.5] , ) -> Any:
__a = size if size is not None else {"""height""": 18, """width""": 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
def lowerCAmelCase_ ( self : str ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
A_ : Union[str, Any] = ViTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Any ) -> str:
__a = EfficientFormerImageProcessorTester(self )
@property
def lowerCAmelCase_ ( self : int ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[Any] ) -> List[str]:
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCAmelCase_ ( self : Dict ) -> Tuple:
pass
def lowerCAmelCase_ ( self : Tuple ) -> Optional[int]:
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
__a = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
__a = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : int ) -> int:
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
__a = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 710
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__A = logging.get_logger(__name__)
# General docstring
__A = """RegNetConfig"""
# Base docstring
__A = """facebook/regnet-y-040"""
__A = [1, 10_88, 7, 7]
# Image classification docstring
__A = """facebook/regnet-y-040"""
__A = """tabby, tabby cat"""
__A = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[str] = "relu" , **lowerCamelCase_ : Union[str, Any] , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__a = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=lowerCamelCase_ , strides=lowerCamelCase_ , padding="""VALID""" , groups=lowerCamelCase_ , use_bias=lowerCamelCase_ , name="""convolution""" , )
__a = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
__a = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] ) -> Optional[Any]:
__a = self.convolution(self.padding(lowerCamelCase_ ) )
__a = self.normalization(lowerCamelCase_ )
__a = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : RegNetConfig , **lowerCamelCase_ : Tuple ) -> List[Any]:
super().__init__(**lowerCamelCase_ )
__a = config.num_channels
__a = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCAmelCase_ ( self : Dict , lowerCamelCase_ : Tuple ) -> List[str]:
__a = shape_list(lowerCamelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__a = tf.transpose(lowerCamelCase_ , perm=(0, 2, 3, 1) )
__a = self.embedder(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int = 2 , **lowerCamelCase_ : Optional[int] ) -> Any:
super().__init__(**lowerCamelCase_ )
__a = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=1 , strides=lowerCamelCase_ , use_bias=lowerCamelCase_ , name="""convolution""" )
__a = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(lowerCamelCase_ ) , training=lowerCamelCase_ )
class a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , **lowerCamelCase_ : List[Any] ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
__a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name="""pooler""" )
__a = [
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : Dict ) -> int:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__a = self.pooler(lowerCamelCase_ )
for layer_module in self.attention:
__a = layer_module(lowerCamelCase_ )
__a = hidden_state * pooled
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Optional[int] ) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
__a = in_channels != out_channels or stride != 1
__a = max(1 , out_channels // config.groups_width )
__a = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__a = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name="""layer.2""" ),
]
__a = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Dict , lowerCamelCase_ : List[str] ) -> Tuple:
__a = hidden_state
for layer_module in self.layers:
__a = layer_module(lowerCamelCase_ )
__a = self.shortcut(lowerCamelCase_ )
hidden_state += residual
__a = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Optional[Any] ) -> Dict:
super().__init__(**lowerCamelCase_ )
__a = in_channels != out_channels or stride != 1
__a = max(1 , out_channels // config.groups_width )
__a = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
__a = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name="""layer.3""" ),
]
__a = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
__a = hidden_state
for layer_module in self.layers:
__a = layer_module(lowerCamelCase_ )
__a = self.shortcut(lowerCamelCase_ )
hidden_state += residual
__a = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : List[str] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 2 , **lowerCamelCase_ : List[Any] ) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
__a = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
__a = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , name="""layers.0""" ),
*[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self : Dict , lowerCamelCase_ : List[str] ) -> int:
for layer_module in self.layers:
__a = layer_module(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase_ : RegNetConfig , **lowerCamelCase_ : Any ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
__a = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
__a = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
__a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__a = hidden_states + (hidden_state,)
__a = stage_module(lowerCamelCase_ )
if output_hidden_states:
__a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
@keras_serializable
class a ( tf.keras.layers.Layer ):
A_ : str = RegNetConfig
def __init__( self : List[str] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ) -> Tuple:
super().__init__(**lowerCamelCase_ )
__a = config
__a = TFRegNetEmbeddings(lowerCamelCase_ , name="""embedder""" )
__a = TFRegNetEncoder(lowerCamelCase_ , name="""encoder""" )
__a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name="""pooler""" )
@unpack_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.embedder(lowerCamelCase_ , training=lowerCamelCase_ )
__a = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase_ )
# Change to NCHW output format have uniformity in the modules
__a = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
__a = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__a = tuple([tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a ( A_ ):
A_ : str = RegNetConfig
A_ : Tuple = '''regnet'''
A_ : Tuple = '''pixel_values'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
__A = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
__A = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , A_ , )
class a ( A_ ):
def __init__( self : Tuple , lowerCamelCase_ : RegNetConfig , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[Any] ) -> List[str]:
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
__a = TFRegNetMainLayer(lowerCamelCase_ , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : str , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Any=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.regnet(
pixel_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , A_ , )
class a ( A_ , A_ ):
def __init__( self : int , lowerCamelCase_ : RegNetConfig , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Tuple ) -> Union[str, Any]:
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
__a = config.num_labels
__a = TFRegNetMainLayer(lowerCamelCase_ , name="""regnet""" )
# classification head
__a = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : tf.Tensor = None , lowerCamelCase_ : tf.Tensor = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[int]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.regnet(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
__a = outputs.pooler_output if return_dict else outputs[1]
__a = self.classifier[0](lowerCamelCase_ )
__a = self.classifier[1](lowerCamelCase_ )
__a = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase_ , logits=lowerCamelCase_ )
if not return_dict:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 173
| 0
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=7 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=99 ,UpperCamelCase=32 ,UpperCamelCase=5 ,UpperCamelCase=4 ,UpperCamelCase=37 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=512 ,UpperCamelCase=16 ,UpperCamelCase=2 ,UpperCamelCase=0.02 ,UpperCamelCase=3 ,UpperCamelCase=4 ,UpperCamelCase=None ,) -> Dict:
snake_case__ :Any = parent
snake_case__ :List[str] = batch_size
snake_case__ :int = seq_length
snake_case__ :Optional[int] = is_training
snake_case__ :Any = use_input_mask
snake_case__ :str = use_token_type_ids
snake_case__ :Dict = use_labels
snake_case__ :Any = vocab_size
snake_case__ :List[str] = hidden_size
snake_case__ :Tuple = num_hidden_layers
snake_case__ :Dict = num_attention_heads
snake_case__ :Optional[Any] = intermediate_size
snake_case__ :List[Any] = hidden_act
snake_case__ :Optional[Any] = hidden_dropout_prob
snake_case__ :Tuple = attention_probs_dropout_prob
snake_case__ :str = max_position_embeddings
snake_case__ :int = type_vocab_size
snake_case__ :List[Any] = type_sequence_label_size
snake_case__ :Optional[int] = initializer_range
snake_case__ :Any = num_labels
snake_case__ :List[str] = num_choices
snake_case__ :str = scope
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ :str = None
if self.use_input_mask:
snake_case__ :int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ :List[str] = None
if self.use_token_type_ids:
snake_case__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ :Union[str, Any] = None
snake_case__ :Union[str, Any] = None
snake_case__ :Union[str, Any] = None
if self.use_labels:
snake_case__ :int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ :str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ :List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ :Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self ) -> Optional[int]:
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCamelCase ,initializer_range=self.initializer_range ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
snake_case__ :Union[str, Any] = NystromformerModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Union[str, Any] = model(UpperCamelCase ,attention_mask=UpperCamelCase ,token_type_ids=UpperCamelCase )
snake_case__ :Dict = model(UpperCamelCase ,token_type_ids=UpperCamelCase )
snake_case__ :List[str] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
snake_case__ :Dict = NystromformerForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :int = model(UpperCamelCase ,attention_mask=UpperCamelCase ,token_type_ids=UpperCamelCase ,labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Tuple = NystromformerForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Optional[Any] = model(
UpperCamelCase ,attention_mask=UpperCamelCase ,token_type_ids=UpperCamelCase ,start_positions=UpperCamelCase ,end_positions=UpperCamelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :List[Any] = self.num_labels
snake_case__ :int = NystromformerForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :List[Any] = model(UpperCamelCase ,attention_mask=UpperCamelCase ,token_type_ids=UpperCamelCase ,labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
snake_case__ :Tuple = self.num_labels
snake_case__ :int = NystromformerForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Optional[int] = model(UpperCamelCase ,attention_mask=UpperCamelCase ,token_type_ids=UpperCamelCase ,labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
snake_case__ :List[str] = self.num_choices
snake_case__ :List[str] = NystromformerForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ :Dict = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ :Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ :Tuple = model(
UpperCamelCase ,attention_mask=UpperCamelCase ,token_type_ids=UpperCamelCase ,labels=UpperCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) :Any = config_and_inputs
snake_case__ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( _A , _A , unittest.TestCase ):
_A = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_A = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Any = NystromformerModelTester(self )
snake_case__ :Tuple = ConfigTester(self ,config_class=UpperCamelCase ,hidden_size=37 )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ :Any = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ :Dict = NystromformerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Tuple = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ :Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case__ :List[Any] = model(UpperCamelCase )[0]
snake_case__ :Tuple = torch.Size((1, 6, 768) )
self.assertEqual(output.shape ,UpperCamelCase )
snake_case__ :Optional[int] = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,UpperCamelCase ,atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Optional[int] = "the [MASK] of Belgium is Brussels"
snake_case__ :Any = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ :Any = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ :Optional[Any] = tokenizer(UpperCamelCase ,return_tensors="pt" )
with torch.no_grad():
snake_case__ :str = model(encoding.input_ids ).logits
snake_case__ :Optional[int] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCamelCase ) ,"capital" )
| 241
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : int = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 241
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
a = logging.get_logger(__name__)
class __a ( _snake_case ):
def __init__( self : Optional[Any] ,*lowerCamelCase : List[str] ,**lowerCamelCase : List[str] ):
'''simple docstring'''
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" ,lowerCamelCase ,)
super().__init__(*lowerCamelCase ,**lowerCamelCase )
| 13
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" )
__SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
__SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 13
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase_ : Dict = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ : Tuple = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : str = BartTokenizer
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="replace" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="<mask>" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> List[Any]:
super().__init__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,errors=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE ,trim_offsets=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,_SCREAMING_SNAKE_CASE ) != add_prefix_space:
_snake_case = getattr(_SCREAMING_SNAKE_CASE ,pre_tok_state.pop("type" ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**_SCREAMING_SNAKE_CASE )
_snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_snake_case = "post_processor"
_snake_case = getattr(self.backend_tokenizer ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
_snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case = tuple(state["sep"] )
if "cls" in state:
_snake_case = tuple(state["cls"] )
_snake_case = False
if state.get("add_prefix_space" ,_SCREAMING_SNAKE_CASE ) != add_prefix_space:
_snake_case = add_prefix_space
_snake_case = True
if state.get("trim_offsets" ,_SCREAMING_SNAKE_CASE ) != trim_offsets:
_snake_case = trim_offsets
_snake_case = True
if changes_to_apply:
_snake_case = getattr(_SCREAMING_SNAKE_CASE ,state.pop("type" ) )
_snake_case = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> str:
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else value
_snake_case = value
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
_snake_case = kwargs.get("is_split_into_words" ,_SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
_snake_case = kwargs.get("is_split_into_words" ,_SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE ,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Tuple:
_snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 185
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ : Optional[int] = object()
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_snake_case = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ):
_snake_case = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )]
if matches and all(_UpperCamelCase ):
return True
return False
def __a ( _UpperCamelCase: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
def replace(_UpperCamelCase: Tuple , _UpperCamelCase: List[str] ):
for rule, replacement in rules:
if _match(_UpperCamelCase , _UpperCamelCase ):
return replacement
return val
return replace
def __a ( ) -> Any:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , _UpperCamelCase )),
(("transformer", "wte", "embedding"), P("mp" , _UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , _UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_UpperCamelCase , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , _UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( _UpperCamelCase: Union[str, Any] ) -> Any:
"""simple docstring"""
_snake_case = _get_partition_rules()
_snake_case = _replacement_rules(_UpperCamelCase )
_snake_case = {k: _unmatched for k in flatten_dict(_UpperCamelCase )}
_snake_case = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_UpperCamelCase ) )
| 185
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
__snake_case = BigBirdConfig.from_json_file(_lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
__snake_case = BigBirdForQuestionAnswering(_lowerCAmelCase )
else:
__snake_case = BigBirdForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_lowerCAmelCase , _lowerCAmelCase , is_trivia_qa=_lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
A : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 473
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A : Any = logging.get_logger(__name__)
class UpperCamelCase( _a ):
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 473
| 1
|
def UpperCAmelCase_ ( __UpperCAmelCase : list[list] ) -> list[list]:
SCREAMING_SNAKE_CASE_ = current_set.copy()
for row_index, row in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = row[0]
for column_index, column in enumerate(__UpperCAmelCase ):
if magnitude == 0:
SCREAMING_SNAKE_CASE_ = column
continue
SCREAMING_SNAKE_CASE_ = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE_ = current_set[0]
SCREAMING_SNAKE_CASE_ = [first_row]
SCREAMING_SNAKE_CASE_ = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__UpperCAmelCase )
continue
for column_index in range(len(__UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE_ = final_set[0]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE_ = simplify(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = resultant
return final_set
def UpperCAmelCase_ ( __UpperCAmelCase : list[list] ) -> list:
if len(__UpperCAmelCase ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) + 1
if any(len(__UpperCAmelCase ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(__UpperCAmelCase , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(__UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE_ = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE_ = data_set.copy()
SCREAMING_SNAKE_CASE_ = []
for row_index, row in enumerate(__UpperCAmelCase ):
if 0 not in row:
SCREAMING_SNAKE_CASE_ = data_set.pop(__UpperCAmelCase )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = data_set.copy()
SCREAMING_SNAKE_CASE_ = simplify(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = simplified[::-1]
SCREAMING_SNAKE_CASE_ = []
for row in simplified:
SCREAMING_SNAKE_CASE_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE_ = row.copy()[: len(__UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__UpperCAmelCase ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE_ = temp_row[1::]
SCREAMING_SNAKE_CASE_ = temp_row[::-1]
for column_index, column in enumerate(__UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = []
for item in solutions:
final.append(float(round(__UpperCAmelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : List[str] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 31
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs
| 31
| 1
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
lowercase = {
'''169M''': 768,
'''430M''': 1_024,
'''1B5''': 2_048,
'''3B''': 2_560,
'''7B''': 4_096,
'''14B''': 5_120,
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =list(state_dict.keys() )
for name in state_dict_keys:
a_ =state_dict.pop(lowercase__ )
# emb -> embedding
if name.startswith("emb." ):
a_ =name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
a_ =name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
a_ =re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , lowercase__ )
# ffn -> feed_forward
a_ =re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , lowercase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
a_ =name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
a_ =name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
a_ =name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
a_ ="rwkv." + name
a_ =weight
return state_dict
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
a_ =5_0_2_7_7
a_ =AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
a_ =PreTrainedTokenizerFast(tokenizer_file=lowercase__ )
a_ =len(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
# 2. Build the config
a_ =list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
a_ =candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
a_ =RwkvConfig(
vocab_size=lowercase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase__ )
# 3. Download model file then convert state_dict
a_ =hf_hub_download(lowercase__ , lowercase__ )
a_ =torch.load(lowercase__ , map_location="cpu" )
a_ =convert_state_dict(lowercase__ )
# 4. Split in shards and save
a_ , a_ =shard_checkpoint(lowercase__ )
for shard_file, shard in shards.items():
torch.save(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
if index is not None:
a_ =os.path.join(lowercase__ , lowercase__ )
# Save the index as well
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
a_ =json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + "\n"
f.write(lowercase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
a_ =list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
a_ =torch.load(os.path.join(lowercase__ , lowercase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase__ , lowercase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
a_ =AutoModelForCausalLM.from_pretrained(lowercase__ )
model.push_to_hub(lowercase__ , max_shard_size="2GB" )
tokenizer.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowercase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = os.path.abspath(UpperCAmelCase )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
SCREAMING_SNAKE_CASE_ = tf.train.list_variables(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
SCREAMING_SNAKE_CASE_ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
SCREAMING_SNAKE_CASE_ = name[1:]
# figure out how many levels deep the name is
SCREAMING_SNAKE_CASE_ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(UpperCAmelCase )
# read data
SCREAMING_SNAKE_CASE_ = tf.train.load_variable(UpperCAmelCase ,UpperCAmelCase )
names.append('''/'''.join(UpperCAmelCase ) )
arrays.append(UpperCAmelCase )
logger.info(f'''Read a total of {len(UpperCAmelCase ):,} layers''' )
# Sanity check
if len(set(UpperCAmelCase ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(UpperCAmelCase ) )})''' )
SCREAMING_SNAKE_CASE_ = list(set(UpperCAmelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(UpperCAmelCase ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split('''/''' )
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = []
for i, m_name in enumerate(UpperCAmelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
SCREAMING_SNAKE_CASE_ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''embeddings''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''encoder''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''layer''' )
SCREAMING_SNAKE_CASE_ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''pooler''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''token_type_embeddings''' )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append('''weight''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''attention''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''attention''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''output''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''attention''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''output''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''output''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''output''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''intermediate''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
SCREAMING_SNAKE_CASE_ = getattr(UpperCAmelCase ,'''weight''' )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
SCREAMING_SNAKE_CASE_ = '''.'''.join(UpperCAmelCase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' ,UpperCAmelCase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
SCREAMING_SNAKE_CASE_ = array.transpose()
if pointer.shape == array.shape:
SCREAMING_SNAKE_CASE_ = torch.from_numpy(UpperCAmelCase )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
logger.info(f'''Loading model based on config from {config_path}...''' )
SCREAMING_SNAKE_CASE_ = BertConfig.from_json_file(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = BertModel(UpperCAmelCase )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() ,UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
A_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 393
|
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = len(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = [[0] * n for i in range(UpperCAmelCase )]
for i in range(UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = y_points[i]
for i in range(2 ,UpperCAmelCase ):
for j in range(UpperCAmelCase ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
| 1
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=32 , _snake_case=2 , _snake_case=3 , _snake_case=16 , _snake_case=[32, 64, 128] , _snake_case=[1, 2, 1] , _snake_case=[2, 2, 4] , _snake_case=2 , _snake_case=2.0 , _snake_case=True , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case="gelu" , _snake_case=False , _snake_case=True , _snake_case=0.02 , _snake_case=1e-5 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=10 , _snake_case=8 , _snake_case=["stage1", "stage2"] , _snake_case=[1, 2] , ) -> List[str]:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Optional[Any] = embed_dim
UpperCAmelCase_ : str = hidden_sizes
UpperCAmelCase_ : Any = depths
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Optional[Any] = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = drop_path_rate
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[Any] = use_absolute_embeddings
UpperCAmelCase_ : int = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Any = out_features
UpperCAmelCase_ : Any = out_indices
def _snake_case ( self) -> int:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ : Dict = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self) -> Optional[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = FocalNetModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Optional[int] = model(_snake_case)
UpperCAmelCase_ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
UpperCAmelCase_ : Dict = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = FocalNetBackbone(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Union[str, Any] = FocalNetBackbone(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Optional[int] = model(_snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> Any:
UpperCAmelCase_ : int = FocalNetForMaskedImageModeling(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_snake_case)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[Any] = FocalNetForMaskedImageModeling(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase_ : Optional[int] = model(_snake_case)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> int:
UpperCAmelCase_ : str = self.type_sequence_label_size
UpperCAmelCase_ : str = FocalNetForImageClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : List[Any] = model(_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = FocalNetForImageClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase_ : Optional[int] = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _snake_case ( self) -> Optional[int]:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a_, a_, unittest.TestCase ):
_lowerCamelCase : Any= (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase : int= (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : int= False
_lowerCamelCase : Any= False
_lowerCamelCase : Optional[Any]= False
_lowerCamelCase : Optional[Any]= False
_lowerCamelCase : Any= False
def _snake_case ( self) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = FocalNetModelTester(self)
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=_snake_case , embed_dim=37 , has_text_modality=_snake_case)
def _snake_case ( self) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self) -> str:
return
def _snake_case ( self) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def _snake_case ( self) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case)
def _snake_case ( self) -> List[Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case)
def _snake_case ( self) -> Any:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='FocalNet does not use inputs_embeds')
def _snake_case ( self) -> int:
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking')
def _snake_case ( self) -> Optional[int]:
pass
def _snake_case ( self) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Any = model_class(_snake_case)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear))
def _snake_case ( self) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(_snake_case)
UpperCAmelCase_ : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case)
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case) -> List[str]:
UpperCAmelCase_ : str = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ : List[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(_snake_case) , _snake_case)
# FocalNet has a different seq_length
UpperCAmelCase_ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase_ : Any = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case) , _snake_case)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[int] = (
reshaped_hidden_states[0].view(_snake_case , _snake_case , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _snake_case ( self) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case)
def _snake_case ( self) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width))
@slow
def _snake_case ( self) -> Tuple:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = FocalNetModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def _snake_case ( self) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(_snake_case)
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=_snake_case)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def _snake_case ( self) -> List[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny') if is_vision_available() else None
@slow
def _snake_case ( self) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny').to(_snake_case)
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
UpperCAmelCase_ : Any = image_processor(images=_snake_case , return_tensors='pt').to(_snake_case)
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_snake_case)
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_166, -0.4_368, 0.2_191]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281)
@require_torch
class lowercase ( a_, unittest.TestCase ):
_lowerCamelCase : List[Any]= (FocalNetBackbone,) if is_torch_available() else ()
_lowerCamelCase : List[Any]= FocalNetConfig
_lowerCamelCase : List[str]= False
def _snake_case ( self) -> str:
UpperCAmelCase_ : Optional[Any] = FocalNetModelTester(self)
| 471
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class lowercase ( a_ ):
_lowerCamelCase : Any= "gptsan-japanese"
_lowerCamelCase : List[Any]= [
"past_key_values",
]
_lowerCamelCase : Tuple= {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _snake_case=3_6000 , _snake_case=1280 , _snake_case=1024 , _snake_case=8192 , _snake_case=4096 , _snake_case=128 , _snake_case=10 , _snake_case=0 , _snake_case=16 , _snake_case=16 , _snake_case=128 , _snake_case=0.0 , _snake_case=1e-5 , _snake_case=False , _snake_case=0.0 , _snake_case="float32" , _snake_case=False , _snake_case=False , _snake_case=False , _snake_case=0.002 , _snake_case=False , _snake_case=True , _snake_case=3_5998 , _snake_case=3_5995 , _snake_case=3_5999 , **_snake_case , ) -> Optional[int]:
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Any = d_model
UpperCAmelCase_ : int = d_ff
UpperCAmelCase_ : Tuple = d_ext
UpperCAmelCase_ : Tuple = d_spout
UpperCAmelCase_ : List[Any] = num_switch_layers
UpperCAmelCase_ : Tuple = num_ext_layers
UpperCAmelCase_ : Optional[int] = num_switch_layers + num_ext_layers
UpperCAmelCase_ : Tuple = num_heads
UpperCAmelCase_ : List[str] = num_experts
UpperCAmelCase_ : List[str] = expert_capacity
UpperCAmelCase_ : Optional[int] = dropout_rate
UpperCAmelCase_ : List[Any] = layer_norm_epsilon
UpperCAmelCase_ : Any = router_bias
UpperCAmelCase_ : Optional[Any] = router_jitter_noise
UpperCAmelCase_ : str = router_dtype
UpperCAmelCase_ : List[str] = router_ignore_padding_tokens
UpperCAmelCase_ : Optional[int] = output_hidden_states
UpperCAmelCase_ : Union[str, Any] = output_attentions
UpperCAmelCase_ : Optional[int] = initializer_factor
UpperCAmelCase_ : Optional[int] = output_router_logits
UpperCAmelCase_ : List[str] = use_cache
super().__init__(
separator_token_id=_snake_case , pad_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
| 471
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.